summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig11
-rw-r--r--security/Kconfig.hardening7
-rw-r--r--security/Makefile4
-rw-r--r--security/apparmor/Kconfig18
-rw-r--r--security/apparmor/apparmorfs.c138
-rw-r--r--security/apparmor/domain.c128
-rw-r--r--security/apparmor/file.c49
-rw-r--r--security/apparmor/include/apparmor.h1
-rw-r--r--security/apparmor/include/file.h2
-rw-r--r--security/apparmor/include/match.h3
-rw-r--r--security/apparmor/include/path.h50
-rw-r--r--security/apparmor/include/policy_unpack.h8
-rw-r--r--security/apparmor/label.c12
-rw-r--r--security/apparmor/lsm.c198
-rw-r--r--security/apparmor/match.c6
-rw-r--r--security/apparmor/mount.c67
-rw-r--r--security/apparmor/policy.c9
-rw-r--r--security/apparmor/policy_unpack.c120
-rw-r--r--security/apparmor/policy_unpack_test.c607
-rw-r--r--security/device_cgroup.c15
-rw-r--r--security/integrity/Kconfig11
-rw-r--r--security/integrity/Makefile10
-rw-r--r--security/integrity/digsig.c43
-rw-r--r--security/integrity/ima/Kconfig27
-rw-r--r--security/integrity/ima/Makefile3
-rw-r--r--security/integrity/ima/ima.h104
-rw-r--r--security/integrity/ima/ima_api.c35
-rw-r--r--security/integrity/ima/ima_appraise.c229
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c66
-rw-r--r--security/integrity/ima/ima_crypto.c14
-rw-r--r--security/integrity/ima/ima_init.c8
-rw-r--r--security/integrity/ima/ima_main.c149
-rw-r--r--security/integrity/ima/ima_modsig.c168
-rw-r--r--security/integrity/ima/ima_policy.c300
-rw-r--r--security/integrity/ima/ima_queue_keys.c171
-rw-r--r--security/integrity/ima/ima_template.c31
-rw-r--r--security/integrity/ima/ima_template_lib.c64
-rw-r--r--security/integrity/ima/ima_template_lib.h4
-rw-r--r--security/integrity/integrity.h21
-rw-r--r--security/integrity/platform_certs/keyring_handler.c80
-rw-r--r--security/integrity/platform_certs/keyring_handler.h32
-rw-r--r--security/integrity/platform_certs/load_powerpc.c96
-rw-r--r--security/integrity/platform_certs/load_uefi.c72
-rw-r--r--security/keys/Kconfig4
-rw-r--r--security/keys/Makefile4
-rw-r--r--security/keys/compat.c5
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/key.c10
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c6
-rw-r--r--security/keys/trusted-keys/Makefile8
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c (renamed from security/keys/trusted.c)108
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c315
-rw-r--r--security/lockdown/Kconfig47
-rw-r--r--security/lockdown/Makefile1
-rw-r--r--security/lockdown/lockdown.c167
-rw-r--r--security/lsm_audit.c5
-rw-r--r--security/safesetid/securityfs.c7
-rw-r--r--security/security.c121
-rw-r--r--security/selinux/Kconfig33
-rw-r--r--security/selinux/Makefile4
-rw-r--r--security/selinux/avc.c95
-rw-r--r--security/selinux/hooks.c526
-rw-r--r--security/selinux/ibpkey.c2
-rw-r--r--security/selinux/include/avc.h13
-rw-r--r--security/selinux/include/classmap.h9
-rw-r--r--security/selinux/include/ibpkey.h13
-rw-r--r--security/selinux/include/objsec.h28
-rw-r--r--security/selinux/include/security.h43
-rw-r--r--security/selinux/netif.c33
-rw-r--r--security/selinux/netnode.c32
-rw-r--r--security/selinux/netport.c26
-rw-r--r--security/selinux/nlmsgtab.c7
-rw-r--r--security/selinux/selinuxfs.c91
-rw-r--r--security/selinux/ss/context.h43
-rw-r--r--security/selinux/ss/ebitmap.c18
-rw-r--r--security/selinux/ss/ebitmap.h1
-rw-r--r--security/selinux/ss/mls.c3
-rw-r--r--security/selinux/ss/policydb.c418
-rw-r--r--security/selinux/ss/policydb.h5
-rw-r--r--security/selinux/ss/services.c327
-rw-r--r--security/selinux/ss/services.h6
-rw-r--r--security/selinux/ss/sidtab.c427
-rw-r--r--security/selinux/ss/sidtab.h85
-rw-r--r--security/smack/smack_access.c6
-rw-r--r--security/smack/smack_lsm.c41
-rw-r--r--security/tomoyo/common.c20
-rw-r--r--security/tomoyo/domain.c15
-rw-r--r--security/tomoyo/group.c9
-rw-r--r--security/tomoyo/realpath.c32
-rw-r--r--security/tomoyo/util.c6
91 files changed, 4981 insertions, 1441 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 0d65594b5196..2a1a2d396228 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -237,6 +237,7 @@ source "security/apparmor/Kconfig"
source "security/loadpin/Kconfig"
source "security/yama/Kconfig"
source "security/safesetid/Kconfig"
+source "security/lockdown/Kconfig"
source "security/integrity/Kconfig"
@@ -276,11 +277,11 @@ endchoice
config LSM
string "Ordered list of enabled LSMs"
- default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
- default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
- default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
- default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
- default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
+ default "lockdown,yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+ default "lockdown,yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+ default "lockdown,yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+ default "lockdown,yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
+ default "lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
help
A comma-separated list of LSMs, in initialization order.
Any LSMs left off this list will be ignored. This can be
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index a1ffe2eb4d5f..af4c979b38ee 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -61,6 +61,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF
bool "zero-init structs passed by reference (strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any structures on the stack that may
@@ -70,9 +71,15 @@ choice
exposures, like CVE-2017-1000410:
https://git.kernel.org/linus/06e7e776ca4d3654
+ As a side-effect, this keeps a lot of variables on the
+ stack that can otherwise be optimized out, so combining
+ this with CONFIG_KASAN_STACK can lead to a stack overflow
+ and is disallowed.
+
config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
bool "zero-init anything passed by reference (very strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any stack variables that may be passed
diff --git a/security/Makefile b/security/Makefile
index c598b904938f..746438499029 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -11,6 +11,7 @@ subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
subdir-$(CONFIG_SECURITY_SAFESETID) += safesetid
+subdir-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown
# always enable default capabilities
obj-y += commoncap.o
@@ -21,12 +22,13 @@ obj-$(CONFIG_SECURITY) += security.o
obj-$(CONFIG_SECURITYFS) += inode.o
obj-$(CONFIG_SECURITY_SELINUX) += selinux/
obj-$(CONFIG_SECURITY_SMACK) += smack/
-obj-$(CONFIG_AUDIT) += lsm_audit.o
+obj-$(CONFIG_SECURITY) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
+obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index d8b1a360a636..0fe336860773 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -6,6 +6,8 @@ config SECURITY_APPARMOR
select SECURITY_PATH
select SECURITYFS
select SECURITY_NETWORK
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
default n
help
This enables the AppArmor security module.
@@ -66,3 +68,19 @@ config SECURITY_APPARMOR_DEBUG_MESSAGES
Set the default value of the apparmor.debug kernel parameter.
When enabled, various debug messages will be logged to
the kernel message buffer.
+
+config SECURITY_APPARMOR_KUNIT_TEST
+ bool "Build KUnit tests for policy_unpack.c"
+ depends on KUNIT=y && SECURITY_APPARMOR
+ help
+ This builds the AppArmor KUnit tests.
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running KUnit test harness and are not for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 45d13b6462aa..280741fc0f5f 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/poll.h>
+#include <linux/zlib.h>
#include <uapi/linux/major.h>
#include <uapi/linux/magic.h>
@@ -65,6 +66,35 @@
* support fns
*/
+struct rawdata_f_data {
+ struct aa_loaddata *loaddata;
+};
+
+#define RAWDATA_F_DATA_BUF(p) (char *)(p + 1)
+
+static void rawdata_f_data_free(struct rawdata_f_data *private)
+{
+ if (!private)
+ return;
+
+ aa_put_loaddata(private->loaddata);
+ kvfree(private);
+}
+
+static struct rawdata_f_data *rawdata_f_data_alloc(size_t size)
+{
+ struct rawdata_f_data *ret;
+
+ if (size > SIZE_MAX - sizeof(*ret))
+ return ERR_PTR(-EINVAL);
+
+ ret = kvzalloc(sizeof(*ret) + size, GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
/**
* aa_mangle_name - mangle a profile name to std profile layout form
* @name: profile name to mangle (NOT NULL)
@@ -593,7 +623,7 @@ static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
void __aa_bump_ns_revision(struct aa_ns *ns)
{
- ns->revision++;
+ WRITE_ONCE(ns->revision, ns->revision + 1);
wake_up_interruptible(&ns->wait);
}
@@ -1280,36 +1310,117 @@ static int seq_rawdata_hash_show(struct seq_file *seq, void *v)
return 0;
}
+static int seq_rawdata_compressed_size_show(struct seq_file *seq, void *v)
+{
+ struct aa_loaddata *data = seq->private;
+
+ seq_printf(seq, "%zu\n", data->compressed_size);
+
+ return 0;
+}
+
SEQ_RAWDATA_FOPS(abi);
SEQ_RAWDATA_FOPS(revision);
SEQ_RAWDATA_FOPS(hash);
+SEQ_RAWDATA_FOPS(compressed_size);
+
+static int deflate_decompress(char *src, size_t slen, char *dst, size_t dlen)
+{
+ int error;
+ struct z_stream_s strm;
+
+ if (aa_g_rawdata_compression_level == 0) {
+ if (dlen < slen)
+ return -EINVAL;
+ memcpy(dst, src, slen);
+ return 0;
+ }
+
+ memset(&strm, 0, sizeof(strm));
+
+ strm.workspace = kvzalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+
+ error = zlib_inflateInit(&strm);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_inflate_init;
+ }
+
+ strm.next_out = dst;
+ strm.avail_out = dlen;
+
+ error = zlib_inflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END)
+ error = -EINVAL;
+ else
+ error = 0;
+
+ zlib_inflateEnd(&strm);
+fail_inflate_init:
+ kvfree(strm.workspace);
+ return error;
+}
static ssize_t rawdata_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
- struct aa_loaddata *rawdata = file->private_data;
+ struct rawdata_f_data *private = file->private_data;
- return simple_read_from_buffer(buf, size, ppos, rawdata->data,
- rawdata->size);
+ return simple_read_from_buffer(buf, size, ppos,
+ RAWDATA_F_DATA_BUF(private),
+ private->loaddata->size);
}
static int rawdata_release(struct inode *inode, struct file *file)
{
- aa_put_loaddata(file->private_data);
+ rawdata_f_data_free(file->private_data);
return 0;
}
static int rawdata_open(struct inode *inode, struct file *file)
{
+ int error;
+ struct aa_loaddata *loaddata;
+ struct rawdata_f_data *private;
+
if (!policy_view_capable(NULL))
return -EACCES;
- file->private_data = __aa_get_loaddata(inode->i_private);
- if (!file->private_data)
+
+ loaddata = __aa_get_loaddata(inode->i_private);
+ if (!loaddata)
/* lost race: this entry is being reaped */
return -ENOENT;
+ private = rawdata_f_data_alloc(loaddata->size);
+ if (IS_ERR(private)) {
+ error = PTR_ERR(private);
+ goto fail_private_alloc;
+ }
+
+ private->loaddata = loaddata;
+
+ error = deflate_decompress(loaddata->data, loaddata->compressed_size,
+ RAWDATA_F_DATA_BUF(private),
+ loaddata->size);
+ if (error)
+ goto fail_decompress;
+
+ file->private_data = private;
return 0;
+
+fail_decompress:
+ rawdata_f_data_free(private);
+ return error;
+
+fail_private_alloc:
+ aa_put_loaddata(loaddata);
+ return error;
}
static const struct file_operations rawdata_fops = {
@@ -1388,6 +1499,13 @@ int __aa_fs_create_rawdata(struct aa_ns *ns, struct aa_loaddata *rawdata)
rawdata->dents[AAFS_LOADDATA_HASH] = dent;
}
+ dent = aafs_create_file("compressed_size", S_IFREG | 0444, dir,
+ rawdata,
+ &seq_rawdata_compressed_size_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ rawdata->dents[AAFS_LOADDATA_COMPRESSED_SIZE] = dent;
+
dent = aafs_create_file("raw_data", S_IFREG | 0444,
dir, rawdata, &rawdata_fops);
if (IS_ERR(dent))
@@ -2455,16 +2573,18 @@ static const char *policy_get_link(struct dentry *dentry,
{
struct aa_ns *ns;
struct path path;
+ int error;
if (!dentry)
return ERR_PTR(-ECHILD);
+
ns = aa_get_current_ns();
path.mnt = mntget(aafs_mnt);
path.dentry = dget(ns_dir(ns));
- nd_jump_link(&path);
+ error = nd_jump_link(&path);
aa_put_ns(ns);
- return NULL;
+ return ERR_PTR(error);
}
static int policy_readlink(struct dentry *dentry, char __user *buffer,
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 9e0492795267..6ceb74e0f789 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -317,6 +317,7 @@ static int aa_xattrs_match(const struct linux_binprm *bprm,
if (!bprm || !profile->xattr_count)
return 0;
+ might_sleep();
/* transition from exec match to xattr set */
state = aa_dfa_null_transition(profile->xmatch, state);
@@ -361,10 +362,11 @@ out:
}
/**
- * __attach_match_ - find an attachment match
+ * find_attach - do attachment search for unconfined processes
* @bprm - binprm structure of transitioning task
- * @name - to match against (NOT NULL)
+ * @ns: the current namespace (NOT NULL)
* @head - profile list to walk (NOT NULL)
+ * @name - to match against (NOT NULL)
* @info - info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
@@ -374,12 +376,11 @@ out:
*
* Requires: @head not be shared or have appropriate locks held
*
- * Returns: profile or NULL if no match found
+ * Returns: label or NULL if no match found
*/
-static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
- const char *name,
- struct list_head *head,
- const char **info)
+static struct aa_label *find_attach(const struct linux_binprm *bprm,
+ struct aa_ns *ns, struct list_head *head,
+ const char *name, const char **info)
{
int candidate_len = 0, candidate_xattrs = 0;
bool conflict = false;
@@ -388,6 +389,8 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
AA_BUG(!name);
AA_BUG(!head);
+ rcu_read_lock();
+restart:
list_for_each_entry_rcu(profile, head, base.list) {
if (profile->label.flags & FLAG_NULL &&
&profile->label == ns_unconfined(profile->ns))
@@ -413,16 +416,32 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
perm = dfa_user_allow(profile->xmatch, state);
/* any accepting state means a valid match. */
if (perm & MAY_EXEC) {
- int ret;
+ int ret = 0;
if (count < candidate_len)
continue;
- ret = aa_xattrs_match(bprm, profile, state);
- /* Fail matching if the xattrs don't match */
- if (ret < 0)
- continue;
-
+ if (bprm && profile->xattr_count) {
+ long rev = READ_ONCE(ns->revision);
+
+ if (!aa_get_profile_not0(profile))
+ goto restart;
+ rcu_read_unlock();
+ ret = aa_xattrs_match(bprm, profile,
+ state);
+ rcu_read_lock();
+ aa_put_profile(profile);
+ if (rev !=
+ READ_ONCE(ns->revision))
+ /* policy changed */
+ goto restart;
+ /*
+ * Fail matching if the xattrs don't
+ * match
+ */
+ if (ret < 0)
+ continue;
+ }
/*
* TODO: allow for more flexible best match
*
@@ -445,43 +464,28 @@ static struct aa_profile *__attach_match(const struct linux_binprm *bprm,
candidate_xattrs = ret;
conflict = false;
}
- } else if (!strcmp(profile->base.name, name))
+ } else if (!strcmp(profile->base.name, name)) {
/*
* old exact non-re match, without conditionals such
* as xattrs. no more searching required
*/
- return profile;
+ candidate = profile;
+ goto out;
+ }
}
- if (conflict) {
- *info = "conflicting profile attachments";
+ if (!candidate || conflict) {
+ if (conflict)
+ *info = "conflicting profile attachments";
+ rcu_read_unlock();
return NULL;
}
- return candidate;
-}
-
-/**
- * find_attach - do attachment search for unconfined processes
- * @bprm - binprm structure of transitioning task
- * @ns: the current namespace (NOT NULL)
- * @list: list to search (NOT NULL)
- * @name: the executable name to match against (NOT NULL)
- * @info: info message if there was an error
- *
- * Returns: label or NULL if no match found
- */
-static struct aa_label *find_attach(const struct linux_binprm *bprm,
- struct aa_ns *ns, struct list_head *list,
- const char *name, const char **info)
-{
- struct aa_profile *profile;
-
- rcu_read_lock();
- profile = aa_get_profile(__attach_match(bprm, name, list, info));
+out:
+ candidate = aa_get_newest_profile(candidate);
rcu_read_unlock();
- return profile ? &profile->label : NULL;
+ return &candidate->label;
}
static const char *next_name(int xtype, const char *name)
@@ -520,7 +524,7 @@ struct aa_label *x_table_lookup(struct aa_profile *profile, u32 xindex,
label = &new_profile->label;
continue;
}
- label = aa_label_parse(&profile->label, *name, GFP_ATOMIC,
+ label = aa_label_parse(&profile->label, *name, GFP_KERNEL,
true, false);
if (IS_ERR(label))
label = NULL;
@@ -600,7 +604,7 @@ static struct aa_label *x_to_label(struct aa_profile *profile,
/* base the stack on post domain transition */
struct aa_label *base = new;
- new = aa_label_parse(base, stack, GFP_ATOMIC, true, false);
+ new = aa_label_parse(base, stack, GFP_KERNEL, true, false);
if (IS_ERR(new))
new = NULL;
aa_put_label(base);
@@ -685,20 +689,9 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
struct aa_profile *new_profile = NULL;
- char *n = kstrdup(name, GFP_ATOMIC);
-
- if (n) {
- /* name is ptr into buffer */
- long pos = name - buffer;
- /* break per cpu buffer hold */
- put_buffers(buffer);
- new_profile = aa_new_null_profile(profile, false, n,
- GFP_KERNEL);
- get_buffers(buffer);
- name = buffer + pos;
- strcpy((char *)name, n);
- kfree(n);
- }
+
+ new_profile = aa_new_null_profile(profile, false, name,
+ GFP_KERNEL);
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
@@ -719,7 +712,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment variables"
" for %s profile=", name);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
@@ -795,7 +788,7 @@ static int profile_onexec(struct aa_profile *profile, struct aa_label *onexec,
if (DEBUG_ON) {
dbg_printk("apparmor: scrubbing environment "
"variables for %s label=", xname);
- aa_label_printk(onexec, GFP_ATOMIC);
+ aa_label_printk(onexec, GFP_KERNEL);
dbg_printk("\n");
}
*secure_exec = true;
@@ -829,7 +822,7 @@ static struct aa_label *handle_onexec(struct aa_label *label,
bprm, buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_get_newest_label(onexec),
profile_transition(profile, bprm, buffer,
cond, unsafe));
@@ -841,9 +834,9 @@ static struct aa_label *handle_onexec(struct aa_label *label,
buffer, cond, unsafe));
if (error)
return ERR_PTR(error);
- new = fn_label_build_in_ns(label, profile, GFP_ATOMIC,
+ new = fn_label_build_in_ns(label, profile, GFP_KERNEL,
aa_label_merge(&profile->label, onexec,
- GFP_ATOMIC),
+ GFP_KERNEL),
profile_transition(profile, bprm, buffer,
cond, unsafe));
}
@@ -903,13 +896,18 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
ctx->nnp = aa_get_label(label);
/* buffer freed below, name is pointer into buffer */
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto done;
+ }
+
/* Test for onexec first as onexec override other x transitions. */
if (ctx->onexec)
new = handle_onexec(label, ctx->onexec, ctx->token,
bprm, buffer, &cond, &unsafe);
else
- new = fn_label_build(label, profile, GFP_ATOMIC,
+ new = fn_label_build(label, profile, GFP_KERNEL,
profile_transition(profile, bprm, buffer,
&cond, &unsafe));
@@ -953,7 +951,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("scrubbing environment variables for %s "
"label=", bprm->filename);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->secureexec = 1;
@@ -964,7 +962,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (DEBUG_ON) {
dbg_printk("apparmor: clearing unsafe personality "
"bits. %s label=", bprm->filename);
- aa_label_printk(new, GFP_ATOMIC);
+ aa_label_printk(new, GFP_KERNEL);
dbg_printk("\n");
}
bprm->per_clear |= PER_CLEAR_ON_SETID;
@@ -975,7 +973,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
done:
aa_put_label(label);
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 4c1b05eb130c..f1caf3674e1c 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -76,7 +76,7 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
if (aad(sa)->peer) {
audit_log_format(ab, " target=");
aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
- FLAG_VIEW_SUBNS, GFP_ATOMIC);
+ FLAG_VIEW_SUBNS, GFP_KERNEL);
} else if (aad(sa)->fs.target) {
audit_log_format(ab, " target=");
audit_log_untrustedstring(ab, aad(sa)->fs.target);
@@ -332,12 +332,14 @@ int aa_path_perm(const char *op, struct aa_label *label,
flags |= PATH_DELEGATE_DELETED | (S_ISDIR(cond->mode) ? PATH_IS_DIR :
0);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
profile_path_perm(op, profile, path, buffer, request,
cond, flags, &perms));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -475,12 +477,18 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
int error;
/* buffer freed below, lname is pointer in buffer */
- get_buffers(buffer, buffer2);
+ buffer = aa_get_buffer(false);
+ buffer2 = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !buffer2)
+ goto out;
+
error = fn_for_each_confined(label, profile,
profile_path_link(profile, &link, buffer, &target,
buffer2, &cond));
- put_buffers(buffer, buffer2);
-
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(buffer2);
return error;
}
@@ -507,7 +515,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
static int __file_path_perm(const char *op, struct aa_label *label,
struct aa_label *flabel, struct file *file,
- u32 request, u32 denied)
+ u32 request, u32 denied, bool in_atomic)
{
struct aa_profile *profile;
struct aa_perms perms = {};
@@ -524,7 +532,9 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return 0;
flags = PATH_DELEGATE_DELETED | (S_ISDIR(cond.mode) ? PATH_IS_DIR : 0);
- get_buffers(buffer);
+ buffer = aa_get_buffer(in_atomic);
+ if (!buffer)
+ return -ENOMEM;
/* check every profile in task label not in current cache */
error = fn_for_each_not_in_set(flabel, label, profile,
@@ -553,7 +563,7 @@ static int __file_path_perm(const char *op, struct aa_label *label,
if (!error)
update_file_ctx(file_ctx(file), label, request);
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -590,11 +600,12 @@ static int __file_sock_perm(const char *op, struct aa_label *label,
* @label: label being enforced (NOT NULL)
* @file: file to revalidate access permissions on (NOT NULL)
* @request: requested permissions
+ * @in_atomic: whether allocations need to be done in atomic context
*
* Returns: %0 if access allowed else error
*/
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
- u32 request)
+ u32 request, bool in_atomic)
{
struct aa_file_ctx *fctx;
struct aa_label *flabel;
@@ -619,21 +630,25 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
*/
denied = request & ~fctx->allow;
if (unconfined(label) || unconfined(flabel) ||
- (!denied && aa_label_is_subset(flabel, label)))
+ (!denied && aa_label_is_subset(flabel, label))) {
+ rcu_read_unlock();
goto done;
+ }
+ flabel = aa_get_newest_label(flabel);
+ rcu_read_unlock();
/* TODO: label cross check */
if (file->f_path.mnt && path_mediated_fs(file->f_path.dentry))
error = __file_path_perm(op, label, flabel, file, request,
- denied);
+ denied, in_atomic);
else if (S_ISSOCK(file_inode(file)->i_mode))
error = __file_sock_perm(op, label, flabel, file, request,
denied);
-done:
- rcu_read_unlock();
+ aa_put_label(flabel);
+done:
return error;
}
@@ -655,7 +670,8 @@ static void revalidate_tty(struct aa_label *label)
struct tty_file_private, list);
file = file_priv->file;
- if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE))
+ if (aa_file_perm(OP_INHERIT, label, file, MAY_READ | MAY_WRITE,
+ IN_ATOMIC))
drop_tty = 1;
}
spin_unlock(&tty->files_lock);
@@ -669,7 +685,8 @@ static int match_file(const void *p, struct file *file, unsigned int fd)
{
struct aa_label *label = (struct aa_label *)p;
- if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file)))
+ if (aa_file_perm(OP_INHERIT, label, file, aa_map_file_to_perms(file),
+ IN_ATOMIC))
return fd + 1;
return 0;
}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 6b7e6e13176e..1fbabdb565a8 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -36,6 +36,7 @@ extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
extern bool aa_g_debug;
extern bool aa_g_hash_policy;
+extern int aa_g_rawdata_compression_level;
extern bool aa_g_lock_policy;
extern bool aa_g_logsyscall;
extern bool aa_g_paranoid_load;
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index a852be89a7dc..aff26fc71407 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -197,7 +197,7 @@ int aa_path_link(struct aa_label *label, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry);
int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
- u32 request);
+ u32 request, bool in_atomic);
void aa_inherit_files(const struct cred *cred, struct files_struct *files);
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 6b0af638a18d..e23f4aadc1ff 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -134,7 +134,7 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
void aa_dfa_free_kref(struct kref *kref);
-#define WB_HISTORY_SIZE 8
+#define WB_HISTORY_SIZE 24
struct match_workbuf {
unsigned int count;
unsigned int pos;
@@ -147,7 +147,6 @@ struct match_workbuf N = { \
.count = 0, \
.pos = 0, \
.len = 0, \
- .size = WB_HISTORY_SIZE, \
}
unsigned int aa_dfa_leftmatch(struct aa_dfa *dfa, unsigned int start,
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 35a8295e8f3a..44a7945fbe3c 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -11,7 +11,6 @@
#ifndef __AA_PATH_H
#define __AA_PATH_H
-
enum path_flags {
PATH_IS_DIR = 0x1, /* path is a directory */
PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
@@ -26,51 +25,8 @@ int aa_path_name(const struct path *path, int flags, char *buffer,
const char **name, const char **info,
const char *disconnected);
-#define MAX_PATH_BUFFERS 2
-
-/* Per cpu buffers used during mediation */
-/* preallocated buffers to use during path lookups */
-struct aa_buffers {
- char *buf[MAX_PATH_BUFFERS];
-};
-
-#include <linux/percpu.h>
-#include <linux/preempt.h>
-
-DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
-
-#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
-#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
-#define EVAL2(FN, A, X, Y...) \
- do { ASSIGN(FN, A, X, 1); EVAL1(FN, A, Y); } while (0)
-#define EVAL(FN, A, X...) CONCATENATE(EVAL, COUNT_ARGS(X))(FN, A, X)
-
-#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
-
-#ifdef CONFIG_DEBUG_PREEMPT
-#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
-#else
-#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
-#endif
-
-#define __get_buffer(C, N) ({ \
- AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
- (C)->buf[(N)]; })
-
-#define __get_buffers(C, X...) EVAL(__get_buffer, C, X)
-
-#define __put_buffers(X, Y...) ((void)&(X))
-
-#define get_buffers(X...) \
-do { \
- struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \
- __get_buffers(__cpu_var, X); \
-} while (0)
-
-#define put_buffers(X, Y...) \
-do { \
- __put_buffers(X, Y); \
- put_cpu_ptr(&aa_buffers); \
-} while (0)
+#define IN_ATOMIC true
+char *aa_get_buffer(bool in_atomic);
+void aa_put_buffer(char *buf);
#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index 46aefae918f5..e0e1ca7ebc38 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -41,6 +41,7 @@ enum {
AAFS_LOADDATA_REVISION,
AAFS_LOADDATA_HASH,
AAFS_LOADDATA_DATA,
+ AAFS_LOADDATA_COMPRESSED_SIZE,
AAFS_LOADDATA_DIR, /* must be last actual entry */
AAFS_LOADDATA_NDENTS /* count of entries */
};
@@ -61,11 +62,16 @@ struct aa_loaddata {
struct dentry *dents[AAFS_LOADDATA_NDENTS];
struct aa_ns *ns;
char *name;
- size_t size;
+ size_t size; /* the original size of the payload */
+ size_t compressed_size; /* the compressed size of the payload */
long revision; /* the ns policy revision this caused */
int abi;
unsigned char *hash;
+ /* Pointer to payload. If @compressed_size > 0, then this is the
+ * compressed version of the payload, else it is the uncompressed
+ * version (with the size indicated by @size).
+ */
char *data;
};
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
index 59f1cc2557a7..470693239e64 100644
--- a/security/apparmor/label.c
+++ b/security/apparmor/label.c
@@ -1458,11 +1458,13 @@ static inline bool use_label_hname(struct aa_ns *ns, struct aa_label *label,
/* helper macro for snprint routines */
#define update_for_len(total, len, size, str) \
do { \
+ size_t ulen = len; \
+ \
AA_BUG(len < 0); \
- total += len; \
- len = min(len, size); \
- size -= len; \
- str += len; \
+ total += ulen; \
+ ulen = min(ulen, size); \
+ size -= ulen; \
+ str += ulen; \
} while (0)
/**
@@ -1597,7 +1599,7 @@ int aa_label_snxprint(char *str, size_t size, struct aa_ns *ns,
struct aa_ns *prev_ns = NULL;
struct label_it i;
int count = 0, total = 0;
- size_t len;
+ ssize_t len;
AA_BUG(!str && size != 0);
AA_BUG(!label);
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ec3a928af829..b621ad74f54a 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -21,6 +21,7 @@
#include <linux/user_namespace.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/zlib.h>
#include <net/sock.h>
#include <uapi/linux/mount.h>
@@ -43,8 +44,17 @@
/* Flag indicating whether initialization completed */
int apparmor_initialized;
-DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
+union aa_buffer {
+ struct list_head list;
+ char buffer[1];
+};
+
+#define RESERVE_COUNT 2
+static int reserve_count = RESERVE_COUNT;
+static int buffer_count;
+static LIST_HEAD(aa_global_buffers);
+static DEFINE_SPINLOCK(aa_buffers_lock);
/*
* LSM hook functions
@@ -442,7 +452,8 @@ static void apparmor_file_free_security(struct file *file)
aa_put_label(rcu_access_pointer(ctx->label));
}
-static int common_file_perm(const char *op, struct file *file, u32 mask)
+static int common_file_perm(const char *op, struct file *file, u32 mask,
+ bool in_atomic)
{
struct aa_label *label;
int error = 0;
@@ -452,7 +463,7 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
return -EACCES;
label = __begin_current_label_crit_section();
- error = aa_file_perm(op, label, file, mask);
+ error = aa_file_perm(op, label, file, mask, in_atomic);
__end_current_label_crit_section(label);
return error;
@@ -460,12 +471,13 @@ static int common_file_perm(const char *op, struct file *file, u32 mask)
static int apparmor_file_receive(struct file *file)
{
- return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file));
+ return common_file_perm(OP_FRECEIVE, file, aa_map_file_to_perms(file),
+ false);
}
static int apparmor_file_permission(struct file *file, int mask)
{
- return common_file_perm(OP_FPERM, file, mask);
+ return common_file_perm(OP_FPERM, file, mask, false);
}
static int apparmor_file_lock(struct file *file, unsigned int cmd)
@@ -475,11 +487,11 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd)
if (cmd == F_WRLCK)
mask |= MAY_WRITE;
- return common_file_perm(OP_FLOCK, file, mask);
+ return common_file_perm(OP_FLOCK, file, mask, false);
}
static int common_mmap(const char *op, struct file *file, unsigned long prot,
- unsigned long flags)
+ unsigned long flags, bool in_atomic)
{
int mask = 0;
@@ -497,20 +509,21 @@ static int common_mmap(const char *op, struct file *file, unsigned long prot,
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
- return common_file_perm(op, file, mask);
+ return common_file_perm(op, file, mask, in_atomic);
}
static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
{
- return common_mmap(OP_FMMAP, file, prot, flags);
+ return common_mmap(OP_FMMAP, file, prot, flags, GFP_ATOMIC);
}
static int apparmor_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot, unsigned long prot)
{
return common_mmap(OP_FMPROT, vma->vm_file, prot,
- !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
+ !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0,
+ false);
}
static int apparmor_sb_mount(const char *dev_name, const struct path *path,
@@ -1262,6 +1275,16 @@ static const struct kernel_param_ops param_ops_aauint = {
.get = param_get_aauint
};
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp);
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp);
+#define param_check_aacompressionlevel param_check_int
+static const struct kernel_param_ops param_ops_aacompressionlevel = {
+ .set = param_set_aacompressionlevel,
+ .get = param_get_aacompressionlevel
+};
+
static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
@@ -1292,6 +1315,11 @@ bool aa_g_hash_policy = IS_ENABLED(CONFIG_SECURITY_APPARMOR_HASH_DEFAULT);
module_param_named(hash_policy, aa_g_hash_policy, aabool, S_IRUSR | S_IWUSR);
#endif
+/* policy loaddata compression level */
+int aa_g_rawdata_compression_level = Z_DEFAULT_COMPRESSION;
+module_param_named(rawdata_compression_level, aa_g_rawdata_compression_level,
+ aacompressionlevel, 0400);
+
/* Debug mode */
bool aa_g_debug = IS_ENABLED(CONFIG_SECURITY_APPARMOR_DEBUG_MESSAGES);
module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
@@ -1402,6 +1430,7 @@ static int param_set_aauint(const char *val, const struct kernel_param *kp)
return -EPERM;
error = param_set_uint(val, kp);
+ aa_g_path_max = max_t(uint32_t, aa_g_path_max, sizeof(union aa_buffer));
pr_info("AppArmor: buffer size set to %d bytes\n", aa_g_path_max);
return error;
@@ -1456,6 +1485,37 @@ static int param_get_aaintbool(char *buffer, const struct kernel_param *kp)
return param_get_bool(buffer, &kp_local);
}
+static int param_set_aacompressionlevel(const char *val,
+ const struct kernel_param *kp)
+{
+ int error;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized)
+ return -EPERM;
+
+ error = param_set_int(val, kp);
+
+ aa_g_rawdata_compression_level = clamp(aa_g_rawdata_compression_level,
+ Z_NO_COMPRESSION,
+ Z_BEST_COMPRESSION);
+ pr_info("AppArmor: policy rawdata compression level set to %u\n",
+ aa_g_rawdata_compression_level);
+
+ return error;
+}
+
+static int param_get_aacompressionlevel(char *buffer,
+ const struct kernel_param *kp)
+{
+ if (!apparmor_enabled)
+ return -EINVAL;
+ if (apparmor_initialized && !policy_view_capable(NULL))
+ return -EPERM;
+ return param_get_int(buffer, kp);
+}
+
static int param_get_audit(char *buffer, const struct kernel_param *kp)
{
if (!apparmor_enabled)
@@ -1514,6 +1574,61 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
return 0;
}
+char *aa_get_buffer(bool in_atomic)
+{
+ union aa_buffer *aa_buf;
+ bool try_again = true;
+ gfp_t flags = (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+retry:
+ spin_lock(&aa_buffers_lock);
+ if (buffer_count > reserve_count ||
+ (in_atomic && !list_empty(&aa_global_buffers))) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ buffer_count--;
+ spin_unlock(&aa_buffers_lock);
+ return &aa_buf->buffer[0];
+ }
+ if (in_atomic) {
+ /*
+ * out of reserve buffers and in atomic context so increase
+ * how many buffers to keep in reserve
+ */
+ reserve_count++;
+ flags = GFP_ATOMIC;
+ }
+ spin_unlock(&aa_buffers_lock);
+
+ if (!in_atomic)
+ might_sleep();
+ aa_buf = kmalloc(aa_g_path_max, flags);
+ if (!aa_buf) {
+ if (try_again) {
+ try_again = false;
+ goto retry;
+ }
+ pr_warn_once("AppArmor: Failed to allocate a memory buffer.\n");
+ return NULL;
+ }
+ return &aa_buf->buffer[0];
+}
+
+void aa_put_buffer(char *buf)
+{
+ union aa_buffer *aa_buf;
+
+ if (!buf)
+ return;
+ aa_buf = container_of(buf, union aa_buffer, buffer[0]);
+
+ spin_lock(&aa_buffers_lock);
+ list_add(&aa_buf->list, &aa_global_buffers);
+ buffer_count++;
+ spin_unlock(&aa_buffers_lock);
+}
+
/*
* AppArmor init functions
*/
@@ -1525,7 +1640,7 @@ static int param_set_mode(const char *val, const struct kernel_param *kp)
*/
static int __init set_init_ctx(void)
{
- struct cred *cred = (struct cred *)current->real_cred;
+ struct cred *cred = (__force struct cred *)current->real_cred;
set_cred_label(cred, aa_get_label(ns_unconfined(root_ns)));
@@ -1534,38 +1649,48 @@ static int __init set_init_ctx(void)
static void destroy_buffers(void)
{
- u32 i, j;
+ union aa_buffer *aa_buf;
- for_each_possible_cpu(i) {
- for_each_cpu_buffer(j) {
- kfree(per_cpu(aa_buffers, i).buf[j]);
- per_cpu(aa_buffers, i).buf[j] = NULL;
- }
+ spin_lock(&aa_buffers_lock);
+ while (!list_empty(&aa_global_buffers)) {
+ aa_buf = list_first_entry(&aa_global_buffers, union aa_buffer,
+ list);
+ list_del(&aa_buf->list);
+ spin_unlock(&aa_buffers_lock);
+ kfree(aa_buf);
+ spin_lock(&aa_buffers_lock);
}
+ spin_unlock(&aa_buffers_lock);
}
static int __init alloc_buffers(void)
{
- u32 i, j;
-
- for_each_possible_cpu(i) {
- for_each_cpu_buffer(j) {
- char *buffer;
-
- if (cpu_to_node(i) > num_online_nodes())
- /* fallback to kmalloc for offline nodes */
- buffer = kmalloc(aa_g_path_max, GFP_KERNEL);
- else
- buffer = kmalloc_node(aa_g_path_max, GFP_KERNEL,
- cpu_to_node(i));
- if (!buffer) {
- destroy_buffers();
- return -ENOMEM;
- }
- per_cpu(aa_buffers, i).buf[j] = buffer;
+ union aa_buffer *aa_buf;
+ int i, num;
+
+ /*
+ * A function may require two buffers at once. Usually the buffers are
+ * used for a short period of time and are shared. On UP kernel buffers
+ * two should be enough, with more CPUs it is possible that more
+ * buffers will be used simultaneously. The preallocated pool may grow.
+ * This preallocation has also the side-effect that AppArmor will be
+ * disabled early at boot if aa_g_path_max is extremly high.
+ */
+ if (num_online_cpus() > 1)
+ num = 4 + RESERVE_COUNT;
+ else
+ num = 2 + RESERVE_COUNT;
+
+ for (i = 0; i < num; i++) {
+
+ aa_buf = kmalloc(aa_g_path_max, GFP_KERNEL |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!aa_buf) {
+ destroy_buffers();
+ return -ENOMEM;
}
+ aa_put_buffer(&aa_buf->buffer[0]);
}
-
return 0;
}
@@ -1730,7 +1855,7 @@ static int __init apparmor_init(void)
error = alloc_buffers();
if (error) {
AA_ERROR("Unable to allocate work buffers\n");
- goto buffers_out;
+ goto alloc_out;
}
error = set_init_ctx();
@@ -1755,7 +1880,6 @@ static int __init apparmor_init(void)
buffers_out:
destroy_buffers();
-
alloc_out:
aa_destroy_aafs();
aa_teardown_dfa_engine();
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 6ccd3734a841..525ce22dc0e9 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -616,8 +616,8 @@ unsigned int aa_dfa_matchn_until(struct aa_dfa *dfa, unsigned int start,
#define inc_wb_pos(wb) \
do { \
- wb->pos = (wb->pos + 1) & (wb->size - 1); \
- wb->len = (wb->len + 1) & (wb->size - 1); \
+ wb->pos = (wb->pos + 1) & (WB_HISTORY_SIZE - 1); \
+ wb->len = (wb->len + 1) & (WB_HISTORY_SIZE - 1); \
} while (0)
/* For DFAs that don't support extended tagging of states */
@@ -636,7 +636,7 @@ static bool is_loop(struct match_workbuf *wb, unsigned int state,
return true;
}
if (pos == 0)
- pos = wb->size;
+ pos = WB_HISTORY_SIZE;
pos--;
}
diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c
index 17081c8dbefa..e0828ee7a345 100644
--- a/security/apparmor/mount.c
+++ b/security/apparmor/mount.c
@@ -408,11 +408,13 @@ int aa_remount(struct aa_label *label, const struct path *path,
binary = path->dentry->d_sb->s_type->fs_flags & FS_BINARY_MOUNTDATA;
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, data, binary));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -437,11 +439,18 @@ int aa_bind_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- get_buffers(buffer, old_buffer);
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !old_buffer)
+ goto out;
+
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, flags, NULL, false));
- put_buffers(buffer, old_buffer);
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
@@ -461,11 +470,13 @@ int aa_mount_change_type(struct aa_label *label, const struct path *path,
flags &= (MS_REC | MS_SILENT | MS_SHARED | MS_PRIVATE | MS_SLAVE |
MS_UNBINDABLE);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, NULL, NULL, NULL,
flags, NULL, false));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -488,11 +499,17 @@ int aa_move_mount(struct aa_label *label, const struct path *path,
if (error)
return error;
- get_buffers(buffer, old_buffer);
+ buffer = aa_get_buffer(false);
+ old_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!buffer || !old_buffer)
+ goto out;
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, &old_path, old_buffer,
NULL, MS_MOVE, NULL, false));
- put_buffers(buffer, old_buffer);
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(old_buffer);
path_put(&old_path);
return error;
@@ -533,8 +550,17 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
}
}
- get_buffers(buffer, dev_buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
if (dev_path) {
+ dev_buffer = aa_get_buffer(false);
+ if (!dev_buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
error = fn_for_each_confined(label, profile,
match_mnt(profile, path, buffer, dev_path, dev_buffer,
type, flags, data, binary));
@@ -543,7 +569,10 @@ int aa_new_mount(struct aa_label *label, const char *dev_name,
match_mnt_path_str(profile, path, buffer, dev_name,
type, flags, data, binary, NULL));
}
- put_buffers(buffer, dev_buffer);
+
+out:
+ aa_put_buffer(buffer);
+ aa_put_buffer(dev_buffer);
if (dev_path)
path_put(dev_path);
@@ -591,10 +620,13 @@ int aa_umount(struct aa_label *label, struct vfsmount *mnt, int flags)
AA_BUG(!label);
AA_BUG(!mnt);
- get_buffers(buffer);
+ buffer = aa_get_buffer(false);
+ if (!buffer)
+ return -ENOMEM;
+
error = fn_for_each_confined(label, profile,
profile_umount(profile, &path, buffer));
- put_buffers(buffer);
+ aa_put_buffer(buffer);
return error;
}
@@ -667,8 +699,12 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
AA_BUG(!old_path);
AA_BUG(!new_path);
- get_buffers(old_buffer, new_buffer);
- target = fn_label_build(label, profile, GFP_ATOMIC,
+ old_buffer = aa_get_buffer(false);
+ new_buffer = aa_get_buffer(false);
+ error = -ENOMEM;
+ if (!old_buffer || !new_buffer)
+ goto out;
+ target = fn_label_build(label, profile, GFP_KERNEL,
build_pivotroot(profile, new_path, new_buffer,
old_path, old_buffer));
if (!target) {
@@ -686,7 +722,8 @@ int aa_pivotroot(struct aa_label *label, const struct path *old_path,
/* already audited error */
error = PTR_ERR(target);
out:
- put_buffers(old_buffer, new_buffer);
+ aa_put_buffer(old_buffer);
+ aa_put_buffer(new_buffer);
return error;
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index ade333074c8e..269f2f53c0b1 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -582,7 +582,7 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
{
if (profile) {
if (profile->label.flags & FLAG_IMMUTIBLE) {
- *info = "cannot replace immutible profile";
+ *info = "cannot replace immutable profile";
return -EPERM;
} else if (noreplace) {
*info = "profile already exists";
@@ -856,7 +856,7 @@ static struct aa_profile *update_to_newest_parent(struct aa_profile *new)
ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
u32 mask, struct aa_loaddata *udata)
{
- const char *ns_name, *info = NULL;
+ const char *ns_name = NULL, *info = NULL;
struct aa_ns *ns = NULL;
struct aa_load_ent *ent, *tmp;
struct aa_loaddata *rawdata_ent;
@@ -1043,6 +1043,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label,
out:
aa_put_ns(ns);
aa_put_loaddata(udata);
+ kfree(ns_name);
if (error)
return error;
@@ -1124,8 +1125,8 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
mutex_lock_nested(&ns->parent->lock, ns->level);
- __aa_remove_ns(ns);
__aa_bump_ns_revision(ns);
+ __aa_remove_ns(ns);
mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
@@ -1137,9 +1138,9 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj,
goto fail_ns_lock;
}
name = profile->base.hname;
+ __aa_bump_ns_revision(ns);
__remove_profile(profile);
__aa_labelset_update_subtree(ns);
- __aa_bump_ns_revision(ns);
mutex_unlock(&ns->lock);
}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8cfc9493eefc..2d743c004bc4 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -16,6 +16,7 @@
#include <asm/unaligned.h>
#include <linux/ctype.h>
#include <linux/errno.h>
+#include <linux/zlib.h>
#include "include/apparmor.h"
#include "include/audit.h"
@@ -139,9 +140,11 @@ bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
{
if (l->size != r->size)
return false;
+ if (l->compressed_size != r->compressed_size)
+ return false;
if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
return false;
- return memcmp(l->data, r->data, r->size) == 0;
+ return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
}
/*
@@ -968,11 +971,14 @@ static int verify_header(struct aa_ext *e, int required, const char **ns)
e, error);
return error;
}
- if (*ns && strcmp(*ns, name))
+ if (*ns && strcmp(*ns, name)) {
audit_iface(NULL, NULL, NULL, "invalid ns change", e,
error);
- else if (!*ns)
- *ns = name;
+ } else if (!*ns) {
+ *ns = kstrdup(name, GFP_KERNEL);
+ if (!*ns)
+ return -ENOMEM;
+ }
}
return 0;
@@ -1039,6 +1045,105 @@ struct aa_load_ent *aa_load_ent_alloc(void)
return ent;
}
+static int deflate_compress(const char *src, size_t slen, char **dst,
+ size_t *dlen)
+{
+ int error;
+ struct z_stream_s strm;
+ void *stgbuf, *dstbuf;
+ size_t stglen = deflateBound(slen);
+
+ memset(&strm, 0, sizeof(strm));
+
+ if (stglen < slen)
+ return -EFBIG;
+
+ strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
+ MAX_MEM_LEVEL),
+ GFP_KERNEL);
+ if (!strm.workspace)
+ return -ENOMEM;
+
+ error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
+ if (error != Z_OK) {
+ error = -ENOMEM;
+ goto fail_deflate_init;
+ }
+
+ stgbuf = kvzalloc(stglen, GFP_KERNEL);
+ if (!stgbuf) {
+ error = -ENOMEM;
+ goto fail_stg_alloc;
+ }
+
+ strm.next_in = src;
+ strm.avail_in = slen;
+ strm.next_out = stgbuf;
+ strm.avail_out = stglen;
+
+ error = zlib_deflate(&strm, Z_FINISH);
+ if (error != Z_STREAM_END) {
+ error = -EINVAL;
+ goto fail_deflate;
+ }
+ error = 0;
+
+ if (is_vmalloc_addr(stgbuf)) {
+ dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
+ if (dstbuf) {
+ memcpy(dstbuf, stgbuf, strm.total_out);
+ kvfree(stgbuf);
+ }
+ } else
+ /*
+ * If the staging buffer was kmalloc'd, then using krealloc is
+ * probably going to be faster. The destination buffer will
+ * always be smaller, so it's just shrunk, avoiding a memcpy
+ */
+ dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
+
+ if (!dstbuf) {
+ error = -ENOMEM;
+ goto fail_deflate;
+ }
+
+ *dst = dstbuf;
+ *dlen = strm.total_out;
+
+fail_stg_alloc:
+ zlib_deflateEnd(&strm);
+fail_deflate_init:
+ kvfree(strm.workspace);
+ return error;
+
+fail_deflate:
+ kvfree(stgbuf);
+ goto fail_stg_alloc;
+}
+
+static int compress_loaddata(struct aa_loaddata *data)
+{
+
+ AA_BUG(data->compressed_size > 0);
+
+ /*
+ * Shortcut the no compression case, else we increase the amount of
+ * storage required by a small amount
+ */
+ if (aa_g_rawdata_compression_level != 0) {
+ void *udata = data->data;
+ int error = deflate_compress(udata, data->size, &data->data,
+ &data->compressed_size);
+ if (error)
+ return error;
+
+ kvfree(udata);
+ } else
+ data->compressed_size = data->size;
+
+ return 0;
+}
+
/**
* aa_unpack - unpack packed binary profile(s) data loaded from user space
* @udata: user data copied to kmem (NOT NULL)
@@ -1107,6 +1212,9 @@ int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
goto fail;
}
}
+ error = compress_loaddata(udata);
+ if (error)
+ goto fail;
return 0;
fail_profile:
@@ -1120,3 +1228,7 @@ fail:
return error;
}
+
+#ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
+#include "policy_unpack_test.c"
+#endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
new file mode 100644
index 000000000000..533137f45361
--- /dev/null
+++ b/security/apparmor/policy_unpack_test.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for AppArmor's policy unpack.
+ */
+
+#include <kunit/test.h>
+
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+#define TEST_STRING_NAME "TEST_STRING"
+#define TEST_STRING_DATA "testing"
+#define TEST_STRING_BUF_OFFSET \
+ (3 + strlen(TEST_STRING_NAME) + 1)
+
+#define TEST_U32_NAME "U32_TEST"
+#define TEST_U32_DATA ((u32)0x01020304)
+#define TEST_NAMED_U32_BUF_OFFSET \
+ (TEST_STRING_BUF_OFFSET + 3 + strlen(TEST_STRING_DATA) + 1)
+#define TEST_U32_BUF_OFFSET \
+ (TEST_NAMED_U32_BUF_OFFSET + 3 + strlen(TEST_U32_NAME) + 1)
+
+#define TEST_U16_OFFSET (TEST_U32_BUF_OFFSET + 3)
+#define TEST_U16_DATA ((u16)(TEST_U32_DATA >> 16))
+
+#define TEST_U64_NAME "U64_TEST"
+#define TEST_U64_DATA ((u64)0x0102030405060708)
+#define TEST_NAMED_U64_BUF_OFFSET (TEST_U32_BUF_OFFSET + sizeof(u32) + 1)
+#define TEST_U64_BUF_OFFSET \
+ (TEST_NAMED_U64_BUF_OFFSET + 3 + strlen(TEST_U64_NAME) + 1)
+
+#define TEST_BLOB_NAME "BLOB_TEST"
+#define TEST_BLOB_DATA "\xde\xad\x00\xbe\xef"
+#define TEST_BLOB_DATA_SIZE (ARRAY_SIZE(TEST_BLOB_DATA))
+#define TEST_NAMED_BLOB_BUF_OFFSET (TEST_U64_BUF_OFFSET + sizeof(u64) + 1)
+#define TEST_BLOB_BUF_OFFSET \
+ (TEST_NAMED_BLOB_BUF_OFFSET + 3 + strlen(TEST_BLOB_NAME) + 1)
+
+#define TEST_ARRAY_NAME "ARRAY_TEST"
+#define TEST_ARRAY_SIZE 16
+#define TEST_NAMED_ARRAY_BUF_OFFSET \
+ (TEST_BLOB_BUF_OFFSET + 5 + TEST_BLOB_DATA_SIZE)
+#define TEST_ARRAY_BUF_OFFSET \
+ (TEST_NAMED_ARRAY_BUF_OFFSET + 3 + strlen(TEST_ARRAY_NAME) + 1)
+
+struct policy_unpack_fixture {
+ struct aa_ext *e;
+ size_t e_size;
+};
+
+struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
+ struct kunit *test, size_t buf_size)
+{
+ char *buf;
+ struct aa_ext *e;
+
+ buf = kunit_kzalloc(test, buf_size, GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, buf);
+
+ e = kunit_kmalloc(test, sizeof(*e), GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, e);
+
+ e->start = buf;
+ e->end = e->start + buf_size;
+ e->pos = e->start;
+
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_STRING_NAME) + 1;
+ strcpy(buf + 3, TEST_STRING_NAME);
+
+ buf = e->start + TEST_STRING_BUF_OFFSET;
+ *buf = AA_STRING;
+ *(buf + 1) = strlen(TEST_STRING_DATA) + 1;
+ strcpy(buf + 3, TEST_STRING_DATA);
+
+ buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_U32_NAME) + 1;
+ strcpy(buf + 3, TEST_U32_NAME);
+ *(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
+ *((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
+
+ buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_U64_NAME) + 1;
+ strcpy(buf + 3, TEST_U64_NAME);
+ *(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
+ *((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
+
+ buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
+ strcpy(buf + 3, TEST_BLOB_NAME);
+ *(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
+ *(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
+ memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
+ TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE);
+
+ buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
+ *buf = AA_NAME;
+ *(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
+ strcpy(buf + 3, TEST_ARRAY_NAME);
+ *(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
+ *((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
+
+ return e;
+}
+
+static int policy_unpack_test_init(struct kunit *test)
+{
+ size_t e_size = TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1;
+ struct policy_unpack_fixture *puf;
+
+ puf = kunit_kmalloc(test, sizeof(*puf), GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, puf);
+
+ puf->e_size = e_size;
+ puf->e = build_aa_ext_struct(puf, test, e_size);
+
+ test->priv = puf;
+ return 0;
+}
+
+static void policy_unpack_test_inbounds_when_inbounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+
+ KUNIT_EXPECT_TRUE(test, inbounds(puf->e, 0));
+ KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size / 2));
+ KUNIT_EXPECT_TRUE(test, inbounds(puf->e, puf->e_size));
+}
+
+static void policy_unpack_test_inbounds_when_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+
+ KUNIT_EXPECT_FALSE(test, inbounds(puf->e, puf->e_size + 1));
+}
+
+static void policy_unpack_test_unpack_array_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ u16 array_size;
+
+ puf->e->pos += TEST_ARRAY_BUF_OFFSET;
+
+ array_size = unpack_array(puf->e, NULL);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_ARRAY_NAME;
+ u16 array_size;
+
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+
+ array_size = unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)TEST_ARRAY_SIZE);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16) + 1);
+}
+
+static void policy_unpack_test_unpack_array_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_ARRAY_NAME;
+ u16 array_size;
+
+ puf->e->pos += TEST_NAMED_ARRAY_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_ARRAY_BUF_OFFSET + sizeof(u16);
+
+ array_size = unpack_array(puf->e, name);
+
+ KUNIT_EXPECT_EQ(test, array_size, (u16)0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_ARRAY_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_blob_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_BLOB_BUF_OFFSET;
+ size = unpack_blob(puf->e, &blob, NULL);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+ memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+ size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_ASSERT_EQ(test, size, TEST_BLOB_DATA_SIZE);
+ KUNIT_EXPECT_TRUE(test,
+ memcmp(blob, TEST_BLOB_DATA, TEST_BLOB_DATA_SIZE) == 0);
+}
+
+static void policy_unpack_test_unpack_blob_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *blob = NULL;
+ void *start;
+ int size;
+
+ puf->e->pos += TEST_NAMED_BLOB_BUF_OFFSET;
+ start = puf->e->pos;
+ puf->e->end = puf->e->start + TEST_BLOB_BUF_OFFSET
+ + TEST_BLOB_DATA_SIZE - 1;
+
+ size = unpack_blob(puf->e, &blob, TEST_BLOB_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_str_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+ size = unpack_str(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ size_t size;
+
+ size = unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_str_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char *string = NULL;
+ void *start = puf->e->pos;
+ int size;
+
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+ size = unpack_str(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *string = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_STRING_BUF_OFFSET;
+ size = unpack_strdup(puf->e, &string, NULL);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+ ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *string = NULL;
+ size_t size;
+
+ size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, strlen(TEST_STRING_DATA) + 1);
+ KUNIT_EXPECT_FALSE(test,
+ ((uintptr_t)puf->e->start <= (uintptr_t)string)
+ && ((uintptr_t)string <= (uintptr_t)puf->e->end));
+ KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA);
+}
+
+static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ void *start = puf->e->pos;
+ char *string = NULL;
+ int size;
+
+ puf->e->end = puf->e->pos + TEST_STRING_BUF_OFFSET
+ + strlen(TEST_STRING_DATA) - 1;
+
+ size = unpack_strdup(puf->e, &string, TEST_STRING_NAME);
+
+ KUNIT_EXPECT_EQ(test, size, 0);
+ KUNIT_EXPECT_PTR_EQ(test, string, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start);
+}
+
+static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = unpack_nameX(puf->e, AA_U32, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_code(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = unpack_nameX(puf->e, AA_BLOB, NULL);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_nameX_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + 1);
+}
+
+static void policy_unpack_test_unpack_nameX_with_wrong_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ static const char name[] = "12345678";
+ bool success;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = unpack_nameX(puf->e, AA_U32, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_basic(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_U16_OFFSET;
+ /*
+ * WARNING: For unit testing purposes, we're pushing puf->e->end past
+ * the end of the allocated memory. Doing anything other than comparing
+ * memory addresses is dangerous.
+ */
+ puf->e->end += TEST_U16_DATA;
+
+ size = unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_PTR_EQ(test, (void *)chunk,
+ puf->e->start + TEST_U16_OFFSET + 2);
+ KUNIT_EXPECT_EQ(test, size, (size_t)TEST_U16_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, (void *)(chunk + TEST_U16_DATA));
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_1(
+ struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos = puf->e->end - 1;
+
+ size = unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, (size_t)0);
+ KUNIT_EXPECT_PTR_EQ(test, chunk, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->end - 1);
+}
+
+static void policy_unpack_test_unpack_u16_chunk_out_of_bounds_2(
+ struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ char *chunk = NULL;
+ size_t size;
+
+ puf->e->pos += TEST_U16_OFFSET;
+ /*
+ * WARNING: For unit testing purposes, we're pushing puf->e->end past
+ * the end of the allocated memory. Doing anything other than comparing
+ * memory addresses is dangerous.
+ */
+ puf->e->end = puf->e->pos + TEST_U16_DATA - 1;
+
+ size = unpack_u16_chunk(puf->e, &chunk);
+
+ KUNIT_EXPECT_EQ(test, size, (size_t)0);
+ KUNIT_EXPECT_PTR_EQ(test, chunk, (char *)NULL);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, puf->e->start + TEST_U16_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u32_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+ u32 data;
+
+ puf->e->pos += TEST_U32_BUF_OFFSET;
+
+ success = unpack_u32(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+ u32 data;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+
+ success = unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U32_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32) + 1);
+}
+
+static void policy_unpack_test_unpack_u32_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U32_NAME;
+ bool success;
+ u32 data;
+
+ puf->e->pos += TEST_NAMED_U32_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U32_BUF_OFFSET + sizeof(u32);
+
+ success = unpack_u32(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U32_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_u64_with_null_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+ u64 data;
+
+ puf->e->pos += TEST_U64_BUF_OFFSET;
+
+ success = unpack_u64(puf->e, &data, NULL);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_with_name(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U64_NAME;
+ bool success;
+ u64 data;
+
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+
+ success = unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_EQ(test, data, TEST_U64_DATA);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64) + 1);
+}
+
+static void policy_unpack_test_unpack_u64_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ const char name[] = TEST_U64_NAME;
+ bool success;
+ u64 data;
+
+ puf->e->pos += TEST_NAMED_U64_BUF_OFFSET;
+ puf->e->end = puf->e->start + TEST_U64_BUF_OFFSET + sizeof(u64);
+
+ success = unpack_u64(puf->e, &data, name);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_PTR_EQ(test, puf->e->pos,
+ puf->e->start + TEST_NAMED_U64_BUF_OFFSET);
+}
+
+static void policy_unpack_test_unpack_X_code_match(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success = unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_TRUE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start + 1);
+}
+
+static void policy_unpack_test_unpack_X_code_mismatch(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success = unpack_X(puf->e, AA_STRING);
+
+ KUNIT_EXPECT_FALSE(test, success);
+ KUNIT_EXPECT_TRUE(test, puf->e->pos == puf->e->start);
+}
+
+static void policy_unpack_test_unpack_X_out_of_bounds(struct kunit *test)
+{
+ struct policy_unpack_fixture *puf = test->priv;
+ bool success;
+
+ puf->e->pos = puf->e->end;
+ success = unpack_X(puf->e, AA_NAME);
+
+ KUNIT_EXPECT_FALSE(test, success);
+}
+
+static struct kunit_case apparmor_policy_unpack_test_cases[] = {
+ KUNIT_CASE(policy_unpack_test_inbounds_when_inbounds),
+ KUNIT_CASE(policy_unpack_test_inbounds_when_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_array_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_array_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_array_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_blob_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_code),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_nameX_with_wrong_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_str_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_strdup_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_basic),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_1),
+ KUNIT_CASE(policy_unpack_test_unpack_u16_chunk_out_of_bounds_2),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u32_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_with_null_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_with_name),
+ KUNIT_CASE(policy_unpack_test_unpack_u64_out_of_bounds),
+ KUNIT_CASE(policy_unpack_test_unpack_X_code_match),
+ KUNIT_CASE(policy_unpack_test_unpack_X_code_mismatch),
+ KUNIT_CASE(policy_unpack_test_unpack_X_out_of_bounds),
+ {},
+};
+
+static struct kunit_suite apparmor_policy_unpack_test_module = {
+ .name = "apparmor_policy_unpack",
+ .init = policy_unpack_test_init,
+ .test_cases = apparmor_policy_unpack_test_cases,
+};
+
+kunit_test_suite(apparmor_policy_unpack_test_module);
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 725674f3276d..7d0f8f7431ff 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -801,8 +801,8 @@ struct cgroup_subsys devices_cgrp_subsys = {
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
-int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{
struct dev_cgroup *dev_cgroup;
bool rc;
@@ -824,3 +824,14 @@ int __devcgroup_check_permission(short type, u32 major, u32 minor,
return 0;
}
+
+int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
+{
+ int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
+
+ if (rc)
+ return -EPERM;
+
+ return __devcgroup_check_permission(type, major, minor, access);
+}
+EXPORT_SYMBOL(devcgroup_check_permission);
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index c352532b8f84..71f0177e8716 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -18,8 +18,8 @@ if INTEGRITY
config INTEGRITY_SIGNATURE
bool "Digital signature verification using multiple keyrings"
- depends on KEYS
default n
+ select KEYS
select SIGNATURE
help
This option enables digital signature verification support
@@ -72,6 +72,15 @@ config LOAD_IPL_KEYS
depends on S390
def_bool y
+config LOAD_PPC_KEYS
+ bool "Enable loading of platform and blacklisted keys for POWER"
+ depends on INTEGRITY_PLATFORM_KEYRING
+ depends on PPC_SECURE_BOOT
+ default y
+ help
+ Enable loading of keys to the .platform keyring and blacklisted
+ hashes to the .blacklist keyring for powerpc based platforms.
+
config INTEGRITY_AUDIT
bool "Enables integrity auditing support "
depends on AUDIT
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
index 19faace69644..7ee39d66cf16 100644
--- a/security/integrity/Makefile
+++ b/security/integrity/Makefile
@@ -11,11 +11,11 @@ integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
- platform_certs/load_uefi.o
+ platform_certs/load_uefi.o \
+ platform_certs/keyring_handler.o
integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
-$(obj)/load_uefi.o: KBUILD_CFLAGS += -fshort-wchar
-
-subdir-$(CONFIG_IMA) += ima
+integrity-$(CONFIG_LOAD_PPC_KEYS) += platform_certs/efi_parser.o \
+ platform_certs/load_powerpc.o \
+ platform_certs/keyring_handler.o
obj-$(CONFIG_IMA) += ima/
-subdir-$(CONFIG_EVM) += evm
obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 868ade3e8970..ea1aae3d07b3 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -39,11 +39,10 @@ static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
#define restrict_link_to_ima restrict_link_by_builtin_trusted
#endif
-int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen)
+static struct key *integrity_keyring_from_id(const unsigned int id)
{
- if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)
- return -EINVAL;
+ if (id >= INTEGRITY_KEYRING_MAX)
+ return ERR_PTR(-EINVAL);
if (!keyring[id]) {
keyring[id] =
@@ -52,23 +51,49 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
int err = PTR_ERR(keyring[id]);
pr_err("no %s keyring: %d\n", keyring_name[id], err);
keyring[id] = NULL;
- return err;
+ return ERR_PTR(err);
}
}
+ return keyring[id];
+}
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ struct key *keyring;
+
+ if (siglen < 2)
+ return -EINVAL;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
switch (sig[1]) {
case 1:
/* v1 API expect signature without xattr type */
- return digsig_verify(keyring[id], sig + 1, siglen - 1,
- digest, digestlen);
+ return digsig_verify(keyring, sig + 1, siglen - 1, digest,
+ digestlen);
case 2:
- return asymmetric_verify(keyring[id], sig, siglen,
- digest, digestlen);
+ return asymmetric_verify(keyring, sig, siglen, digest,
+ digestlen);
}
return -EOPNOTSUPP;
}
+int integrity_modsig_verify(const unsigned int id, const struct modsig *modsig)
+{
+ struct key *keyring;
+
+ keyring = integrity_keyring_from_id(id);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ return ima_modsig_verify(keyring, modsig);
+}
+
static int __init __integrity_init_keyring(const unsigned int id,
key_perm_t perm,
struct key_restriction *restriction)
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 2ced99dde694..711ff10fa36e 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -160,7 +160,7 @@ config IMA_APPRAISE
config IMA_ARCH_POLICY
bool "Enable loading an IMA architecture specific policy"
- depends on (KEXEC_VERIFY_SIG && IMA) || IMA_APPRAISE \
+ depends on (KEXEC_SIG && IMA) || IMA_APPRAISE \
&& INTEGRITY_ASYMMETRIC_KEYS
default n
help
@@ -233,6 +233,19 @@ config IMA_APPRAISE_BOOTPARAM
This option enables the different "ima_appraise=" modes
(eg. fix, log) from the boot command line.
+config IMA_APPRAISE_MODSIG
+ bool "Support module-style signatures for appraisal"
+ depends on IMA_APPRAISE
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select PKCS7_MESSAGE_PARSER
+ select MODULE_SIG_FORMAT
+ default n
+ help
+ Adds support for signatures appended to files. The format of the
+ appended signature is the same used for signed kernel modules.
+ The modsig keyword can be used in the IMA policy to allow a hook
+ to accept such signatures.
+
config IMA_TRUSTED_KEYRING
bool "Require all keys on the .ima keyring be signed (deprecated)"
depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
@@ -297,3 +310,15 @@ config IMA_APPRAISE_SIGNED_INIT
default n
help
This option requires user-space init to be signed.
+
+config IMA_MEASURE_ASYMMETRIC_KEYS
+ bool
+ depends on IMA
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+ default y
+
+config IMA_QUEUE_EARLY_BOOT_KEYS
+ bool
+ depends on IMA_MEASURE_ASYMMETRIC_KEYS
+ depends on SYSTEM_TRUSTED_KEYRING
+ default y
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index d921dc4f9eb0..064a256f8725 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -9,5 +9,8 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o
ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
+obj-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
+obj-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 011b91c79351..64317d95363e 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -60,6 +60,7 @@ struct ima_event_data {
const unsigned char *filename;
struct evm_ima_xattr_data *xattr_value;
int xattr_len;
+ const struct modsig *modsig;
const char *violation;
const void *buf;
int buf_len;
@@ -113,6 +114,8 @@ struct ima_kexec_hdr {
u64 count;
};
+extern const int read_idmap[];
+
#ifdef CONFIG_HAVE_IMA_KEXEC
void ima_load_kexec_buffer(void);
#else
@@ -149,6 +152,7 @@ int template_desc_init_fields(const char *template_fmt,
int *num_fields);
struct ima_template_desc *ima_template_desc_current(void);
struct ima_template_desc *lookup_template_desc(const char *name);
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
@@ -189,6 +193,7 @@ static inline unsigned long ima_hash_key(u8 *digest)
hook(KEXEC_INITRAMFS_CHECK) \
hook(POLICY_CHECK) \
hook(KEXEC_CMDLINE) \
+ hook(KEY_CHECK) \
hook(MAX_CHECK)
#define __ima_hook_enumify(ENUM) ENUM,
@@ -196,19 +201,51 @@ enum ima_hooks {
__ima_hooks(__ima_hook_enumify)
};
+extern const char *const func_tokens[];
+
+struct modsig;
+
+#ifdef CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS
+/*
+ * To track keys that need to be measured.
+ */
+struct ima_key_entry {
+ struct list_head list;
+ void *payload;
+ size_t payload_len;
+ char *keyring_name;
+};
+void ima_init_key_queue(void);
+bool ima_should_queue_key(void);
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len);
+void ima_process_queued_keys(void);
+#else
+static inline void ima_init_key_queue(void) {}
+static inline bool ima_should_queue_key(void) { return false; }
+static inline bool ima_queue_key(struct key *keyring,
+ const void *payload,
+ size_t payload_len) { return false; }
+static inline void ima_process_queued_keys(void) {}
+#endif /* CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS */
+
/* LIM API function definitions */
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
int mask, enum ima_hooks func, int *pcr,
- struct ima_template_desc **template_desc);
+ struct ima_template_desc **template_desc,
+ const char *keyring);
int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file, void *buf, loff_t size,
- enum hash_algo algo);
+ enum hash_algo algo, struct modsig *modsig);
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr,
+ int xattr_len, const struct modsig *modsig, int pcr,
struct ima_template_desc *template_desc);
+void process_buffer_measurement(const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *keyring);
void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
int ima_alloc_init_template(struct ima_event_data *event_data,
@@ -223,7 +260,8 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
/* IMA policy related functions */
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
enum ima_hooks func, int mask, int flags, int *pcr,
- struct ima_template_desc **template_desc);
+ struct ima_template_desc **template_desc,
+ const char *keyring);
void ima_init_policy(void);
void ima_update_policy(void);
void ima_update_policy_flag(void);
@@ -245,11 +283,13 @@ int ima_policy_show(struct seq_file *m, void *v);
#define IMA_APPRAISE_KEXEC 0x40
#ifdef CONFIG_IMA_APPRAISE
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr);
int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len);
+ int xattr_len, const struct modsig *modsig);
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
@@ -260,12 +300,19 @@ int ima_read_xattr(struct dentry *dentry,
struct evm_ima_xattr_data **xattr_value);
#else
+static inline int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ return 0;
+}
+
static inline int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file,
const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len,
+ const struct modsig *modsig)
{
return INTEGRITY_UNKNOWN;
}
@@ -302,6 +349,51 @@ static inline int ima_read_xattr(struct dentry *dentry,
#endif /* CONFIG_IMA_APPRAISE */
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+bool ima_hook_supports_modsig(enum ima_hooks func);
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig);
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size);
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size);
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len);
+void ima_free_modsig(struct modsig *modsig);
+#else
+static inline bool ima_hook_supports_modsig(enum ima_hooks func)
+{
+ return false;
+}
+
+static inline int ima_read_modsig(enum ima_hooks func, const void *buf,
+ loff_t buf_len, struct modsig **modsig)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_collect_modsig(struct modsig *modsig, const void *buf,
+ loff_t size)
+{
+}
+
+static inline int ima_get_modsig_digest(const struct modsig *modsig,
+ enum hash_algo *algo, const u8 **digest,
+ u32 *digest_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ima_get_raw_modsig(const struct modsig *modsig,
+ const void **data, u32 *data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_free_modsig(struct modsig *modsig)
+{
+}
+#endif /* CONFIG_IMA_APPRAISE_MODSIG */
+
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index f614e22bf39f..f6bc00914aa5 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -45,8 +45,8 @@ int ima_alloc_init_template(struct ima_event_data *event_data,
else
template_desc = ima_template_desc_current();
- *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
- sizeof(struct ima_field_data), GFP_NOFS);
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
if (!*entry)
return -ENOMEM;
@@ -169,12 +169,13 @@ err_out:
* @func: caller identifier
* @pcr: pointer filled in if matched measure policy sets pcr=
* @template_desc: pointer filled in if matched measure policy sets template=
+ * @keyring: keyring name used to determine the action
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
* func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
- * | KEXEC_CMDLINE
+ * | KEXEC_CMDLINE | KEY_CHECK
* mask: contains the permission mask
* fsmagic: hex value
*
@@ -183,14 +184,15 @@ err_out:
*/
int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
int mask, enum ima_hooks func, int *pcr,
- struct ima_template_desc **template_desc)
+ struct ima_template_desc **template_desc,
+ const char *keyring)
{
int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
flags &= ima_policy_flag;
return ima_match_policy(inode, cred, secid, func, mask, flags, pcr,
- template_desc);
+ template_desc, keyring);
}
/*
@@ -205,7 +207,7 @@ int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
*/
int ima_collect_measurement(struct integrity_iint_cache *iint,
struct file *file, void *buf, loff_t size,
- enum hash_algo algo)
+ enum hash_algo algo, struct modsig *modsig)
{
const char *audit_cause = "failed";
struct inode *inode = file_inode(file);
@@ -219,6 +221,14 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
char digest[IMA_MAX_DIGEST_SIZE];
} hash;
+ /*
+ * Always collect the modsig, because IMA might have already collected
+ * the file digest without collecting the modsig in a previous
+ * measurement rule.
+ */
+ if (modsig)
+ ima_collect_modsig(modsig, buf, size);
+
if (iint->flags & IMA_COLLECTED)
goto out;
@@ -285,7 +295,7 @@ out:
void ima_store_measurement(struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len, int pcr,
+ int xattr_len, const struct modsig *modsig, int pcr,
struct ima_template_desc *template_desc)
{
static const char op[] = "add_template_measure";
@@ -297,10 +307,17 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
.file = file,
.filename = filename,
.xattr_value = xattr_value,
- .xattr_len = xattr_len };
+ .xattr_len = xattr_len,
+ .modsig = modsig };
int violation = 0;
- if (iint->measured_pcrs & (0x1 << pcr))
+ /*
+ * We still need to store the measurement in the case of MODSIG because
+ * we only have its contents to put in the list at the time of
+ * appraisal, but a file measurement from earlier might already exist in
+ * the measurement list.
+ */
+ if (iint->measured_pcrs & (0x1 << pcr) && !modsig)
return;
result = ima_alloc_init_template(&event_data, &entry, template_desc);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 89b83194d1dc..a9649b04b9f1 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -12,6 +12,7 @@
#include <linux/magic.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <keys/system_keyring.h>
#include "ima.h"
@@ -54,7 +55,7 @@ int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
security_task_getsecid(current, &secid);
return ima_match_policy(inode, current_cred(), secid, func, mask,
- IMA_APPRAISE | IMA_HASH, NULL, NULL);
+ IMA_APPRAISE | IMA_HASH, NULL, NULL, NULL);
}
static int ima_fix_xattr(struct dentry *dentry,
@@ -200,6 +201,142 @@ int ima_read_xattr(struct dentry *dentry,
}
/*
+ * xattr_verify - verify xattr digest or signature
+ *
+ * Verify whether the hash or signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int xattr_verify(enum ima_hooks func, struct integrity_iint_cache *iint,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ enum integrity_status *status, const char **cause)
+{
+ int rc = -EINVAL, hash_start = 0;
+
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ /* fall through */
+ case IMA_XATTR_DIGEST:
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /*
+ * xattr length may be longer. md5 hash in previous
+ * version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->data[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ *cause = "invalid-hash";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ *status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ *status = INTEGRITY_UNKNOWN;
+ break;
+ }
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+ break;
+ default:
+ *status = INTEGRITY_UNKNOWN;
+ *cause = "unknown-ima-data";
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * modsig_verify - verify modsig signature
+ *
+ * Verify whether the signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int modsig_verify(enum ima_hooks func, const struct modsig *modsig,
+ enum integrity_status *status, const char **cause)
+{
+ int rc;
+
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_IMA, modsig);
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ modsig);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ return rc;
+}
+
+/*
+ * ima_check_blacklist - determine if the binary is blacklisted.
+ *
+ * Add the hash of the blacklisted binary to the measurement list, based
+ * on policy.
+ *
+ * Returns -EPERM if the hash is blacklisted.
+ */
+int ima_check_blacklist(struct integrity_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ enum hash_algo hash_algo;
+ const u8 *digest = NULL;
+ u32 digestsize = 0;
+ int rc = 0;
+
+ if (!(iint->flags & IMA_CHECK_BLACKLIST))
+ return 0;
+
+ if (iint->flags & IMA_MODSIG_ALLOWED && modsig) {
+ ima_get_modsig_digest(modsig, &hash_algo, &digest, &digestsize);
+
+ rc = is_binary_blacklisted(digest, digestsize);
+ if ((rc == -EPERM) && (iint->flags & IMA_MEASURE))
+ process_buffer_measurement(digest, digestsize,
+ "blacklisted-hash", NONE,
+ pcr, NULL);
+ }
+
+ return rc;
+}
+
+/*
* ima_appraise_measurement - appraise file measurement
*
* Call evm_verifyxattr() to verify the integrity of 'security.ima'.
@@ -211,19 +348,22 @@ int ima_appraise_measurement(enum ima_hooks func,
struct integrity_iint_cache *iint,
struct file *file, const unsigned char *filename,
struct evm_ima_xattr_data *xattr_value,
- int xattr_len)
+ int xattr_len, const struct modsig *modsig)
{
static const char op[] = "appraise_data";
const char *cause = "unknown";
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_backing_inode(dentry);
enum integrity_status status = INTEGRITY_UNKNOWN;
- int rc = xattr_len, hash_start = 0;
+ int rc = xattr_len;
+ bool try_modsig = iint->flags & IMA_MODSIG_ALLOWED && modsig;
- if (!(inode->i_opflags & IOP_XATTR))
+ /* If not appraising a modsig, we need an xattr. */
+ if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
return INTEGRITY_UNKNOWN;
- if (rc <= 0) {
+ /* If reading the xattr failed and there's no modsig, error out. */
+ if (rc <= 0 && !try_modsig) {
if (rc && rc != -ENODATA)
goto out;
@@ -246,6 +386,10 @@ int ima_appraise_measurement(enum ima_hooks func,
case INTEGRITY_UNKNOWN:
break;
case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
+ /* It's fine not to have xattrs when using a modsig. */
+ if (try_modsig)
+ break;
+ /* fall through */
case INTEGRITY_NOLABEL: /* No security.evm xattr. */
cause = "missing-HMAC";
goto out;
@@ -256,65 +400,18 @@ int ima_appraise_measurement(enum ima_hooks func,
WARN_ONCE(true, "Unexpected integrity status %d\n", status);
}
- switch (xattr_value->type) {
- case IMA_XATTR_DIGEST_NG:
- /* first byte contains algorithm id */
- hash_start = 1;
- /* fall through */
- case IMA_XATTR_DIGEST:
- if (iint->flags & IMA_DIGSIG_REQUIRED) {
- cause = "IMA-signature-required";
- status = INTEGRITY_FAIL;
- break;
- }
- clear_bit(IMA_DIGSIG, &iint->atomic_flags);
- if (xattr_len - sizeof(xattr_value->type) - hash_start >=
- iint->ima_hash->length)
- /* xattr length may be longer. md5 hash in previous
- version occupied 20 bytes in xattr, instead of 16
- */
- rc = memcmp(&xattr_value->data[hash_start],
- iint->ima_hash->digest,
- iint->ima_hash->length);
- else
- rc = -EINVAL;
- if (rc) {
- cause = "invalid-hash";
- status = INTEGRITY_FAIL;
- break;
- }
- status = INTEGRITY_PASS;
- break;
- case EVM_IMA_XATTR_DIGSIG:
- set_bit(IMA_DIGSIG, &iint->atomic_flags);
- rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
- (const char *)xattr_value,
- xattr_len,
- iint->ima_hash->digest,
- iint->ima_hash->length);
- if (rc == -EOPNOTSUPP) {
- status = INTEGRITY_UNKNOWN;
- break;
- }
- if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
- func == KEXEC_KERNEL_CHECK)
- rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
- (const char *)xattr_value,
- xattr_len,
- iint->ima_hash->digest,
- iint->ima_hash->length);
- if (rc) {
- cause = "invalid-signature";
- status = INTEGRITY_FAIL;
- } else {
- status = INTEGRITY_PASS;
- }
- break;
- default:
- status = INTEGRITY_UNKNOWN;
- cause = "unknown-ima-data";
- break;
- }
+ if (xattr_value)
+ rc = xattr_verify(func, iint, xattr_value, xattr_len, &status,
+ &cause);
+
+ /*
+ * If we have a modsig and either no imasig or the imasig's key isn't
+ * known, then try verifying the modsig.
+ */
+ if (try_modsig &&
+ (!xattr_value || xattr_value->type == IMA_XATTR_DIGEST_NG ||
+ rc == -ENOKEY))
+ rc = modsig_verify(func, modsig, &status, &cause);
out:
/*
@@ -332,7 +429,7 @@ out:
op, cause, rc, 0);
} else if (status != INTEGRITY_PASS) {
/* Fix mode, but don't replace file signatures. */
- if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ if ((ima_appraise & IMA_APPRAISE_FIX) && !try_modsig &&
(!xattr_value ||
xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
if (!ima_fix_xattr(dentry, iint))
@@ -371,7 +468,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
!(iint->flags & IMA_HASH))
return;
- rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
+ rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo, NULL);
if (rc < 0)
return;
diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c
new file mode 100644
index 000000000000..7678f0e3e84d
--- /dev/null
+++ b/security/integrity/ima/ima_asymmetric_keys.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_asymmetric_keys.c
+ * Defines an IMA hook to measure asymmetric keys on key
+ * create or update.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <keys/asymmetric-type.h>
+#include "ima.h"
+
+/**
+ * ima_post_key_create_or_update - measure asymmetric keys
+ * @keyring: keyring to which the key is linked to
+ * @key: created or updated key
+ * @payload: The data used to instantiate or update the key.
+ * @payload_len: The length of @payload.
+ * @flags: key flags
+ * @create: flag indicating whether the key was created or updated
+ *
+ * Keys can only be measured, not appraised.
+ * The payload data used to instantiate or update the key is measured.
+ */
+void ima_post_key_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
+{
+ bool queued = false;
+
+ /* Only asymmetric keys are handled by this hook. */
+ if (key->type != &key_type_asymmetric)
+ return;
+
+ if (!payload || (payload_len == 0))
+ return;
+
+ if (ima_should_queue_key())
+ queued = ima_queue_key(keyring, payload, payload_len);
+
+ if (queued)
+ return;
+
+ /*
+ * keyring->description points to the name of the keyring
+ * (such as ".builtin_trusted_keys", ".ima", etc.) to
+ * which the given key is linked to.
+ *
+ * The name of the keyring is passed in the "eventname"
+ * parameter to process_buffer_measurement() and is set
+ * in the "eventname" field in ima_event_data for
+ * the key measurement IMA event.
+ *
+ * The name of the keyring is also passed in the "keyring"
+ * parameter to process_buffer_measurement() to check
+ * if the IMA policy is configured to measure a key linked
+ * to the given keyring.
+ */
+ process_buffer_measurement(payload, payload_len,
+ keyring->description, KEY_CHECK, 0,
+ keyring->description);
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index d4c7b8e1b083..7967a6904851 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -268,8 +268,16 @@ static int ima_calc_file_hash_atfm(struct file *file,
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len);
- if (rc != rbuf_len)
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
goto out3;
+ }
if (rbuf[1] && offset) {
/* Using two buffers, and it is not the first
@@ -354,8 +362,10 @@ static int ima_calc_file_hash_tfm(struct file *file,
rc = rbuf_len;
break;
}
- if (rbuf_len == 0)
+ if (rbuf_len == 0) { /* unexpected EOF */
+ rc = -EINVAL;
break;
+ }
offset += rbuf_len;
rc = crypto_shash_update(shash, rbuf, rbuf_len);
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 5d55ade5f3b9..195cb4079b2b 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -131,5 +131,11 @@ int __init ima_init(void)
ima_init_policy();
- return ima_fs_init();
+ rc = ima_fs_init();
+ if (rc != 0)
+ return rc;
+
+ ima_init_key_queue();
+
+ return rc;
}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 584019728660..9fe949c6a530 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -202,6 +202,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
int rc = 0, action, must_appraise = 0;
int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
struct evm_ima_xattr_data *xattr_value = NULL;
+ struct modsig *modsig = NULL;
int xattr_len = 0;
bool violation_check;
enum hash_algo hash_algo;
@@ -214,7 +215,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
* Included is the appraise submask.
*/
action = ima_get_action(inode, cred, secid, mask, func, &pcr,
- &template_desc);
+ &template_desc, NULL);
violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
(ima_policy_flag & IMA_MEASURE));
if (!action && !violation_check)
@@ -302,13 +303,27 @@ static int process_measurement(struct file *file, const struct cred *cred,
}
if ((action & IMA_APPRAISE_SUBMASK) ||
- strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) {
/* read 'security.ima' */
xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ /*
+ * Read the appended modsig if allowed by the policy, and allow
+ * an additional measurement list entry, if needed, based on the
+ * template format and whether the file was already measured.
+ */
+ if (iint->flags & IMA_MODSIG_ALLOWED) {
+ rc = ima_read_modsig(func, buf, size, &modsig);
+
+ if (!rc && ima_template_has_modsig(template_desc) &&
+ iint->flags & IMA_MEASURED)
+ action |= IMA_MEASURE;
+ }
+ }
+
hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
- rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
if (rc != 0 && rc != -EBADF && rc != -EINVAL)
goto out_locked;
@@ -317,13 +332,17 @@ static int process_measurement(struct file *file, const struct cred *cred,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
- xattr_value, xattr_len, pcr,
+ xattr_value, xattr_len, modsig, pcr,
template_desc);
if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
- inode_lock(inode);
- rc = ima_appraise_measurement(func, iint, file, pathname,
- xattr_value, xattr_len);
- inode_unlock(inode);
+ rc = ima_check_blacklist(iint, modsig, pcr);
+ if (rc != -EPERM) {
+ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file,
+ pathname, xattr_value,
+ xattr_len, modsig);
+ inode_unlock(inode);
+ }
if (!rc)
rc = mmap_violation_check(func, file, &pathbuf,
&pathname, filename);
@@ -339,6 +358,7 @@ out_locked:
rc = -EACCES;
mutex_unlock(&iint->mutex);
kfree(xattr_value);
+ ima_free_modsig(modsig);
out:
if (pathbuf)
__putname(pathbuf);
@@ -426,6 +446,55 @@ int ima_file_check(struct file *file, int mask)
EXPORT_SYMBOL_GPL(ima_file_check);
/**
+ * ima_file_hash - return the stored measurement if a file has been hashed and
+ * is in the iint cache.
+ * @file: pointer to the file
+ * @buf: buffer in which to store the hash
+ * @buf_size: length of the buffer
+ *
+ * On success, return the hash algorithm (as defined in the enum hash_algo).
+ * If buf is not NULL, this function also outputs the hash into buf.
+ * If the hash is larger than buf_size, then only buf_size bytes will be copied.
+ * It generally just makes sense to pass a buffer capable of holding the largest
+ * possible hash: IMA_MAX_DIGEST_SIZE.
+ * The file hash returned is based on the entire file, including the appended
+ * signature.
+ *
+ * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP.
+ * If the parameters are incorrect, return -EINVAL.
+ */
+int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+{
+ struct inode *inode;
+ struct integrity_iint_cache *iint;
+ int hash_algo;
+
+ if (!file)
+ return -EINVAL;
+
+ if (!ima_policy_flag)
+ return -EOPNOTSUPP;
+
+ inode = file_inode(file);
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&iint->mutex);
+ if (buf) {
+ size_t copied_size;
+
+ copied_size = min_t(size_t, iint->ima_hash->length, buf_size);
+ memcpy(buf, iint->ima_hash->digest, copied_size);
+ }
+ hash_algo = iint->ima_hash->algo;
+ mutex_unlock(&iint->mutex);
+
+ return hash_algo;
+}
+EXPORT_SYMBOL_GPL(ima_file_hash);
+
+/**
* ima_post_create_tmpfile - mark newly created tmpfile as new
* @file : newly created tmpfile
*
@@ -502,7 +571,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
return 0;
}
-static const int read_idmap[READING_MAX_ID] = {
+const int read_idmap[READING_MAX_ID] = {
[READING_FIRMWARE] = FIRMWARE_CHECK,
[READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
[READING_MODULE] = MODULE_CHECK,
@@ -574,7 +643,7 @@ int ima_load_data(enum kernel_load_data_id id)
switch (id) {
case LOADING_KEXEC_IMAGE:
- if (IS_ENABLED(CONFIG_KEXEC_VERIFY_SIG)
+ if (IS_ENABLED(CONFIG_KEXEC_SIG)
&& arch_ima_get_secureboot()) {
pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
return -EACCES;
@@ -610,14 +679,15 @@ int ima_load_data(enum kernel_load_data_id id)
* @buf: pointer to the buffer that needs to be added to the log.
* @size: size of buffer(in bytes).
* @eventname: event name to be used for the buffer entry.
- * @cred: a pointer to a credentials structure for user validation.
- * @secid: the secid of the task to be validated.
+ * @func: IMA hook
+ * @pcr: pcr to extend the measurement
+ * @keyring: keyring name to determine the action to be performed
*
* Based on policy, the buffer is measured into the ima log.
*/
-static void process_buffer_measurement(const void *buf, int size,
- const char *eventname,
- const struct cred *cred, u32 secid)
+void process_buffer_measurement(const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *keyring)
{
int ret = 0;
struct ima_template_entry *entry = NULL;
@@ -626,20 +696,49 @@ static void process_buffer_measurement(const void *buf, int size,
.filename = eventname,
.buf = buf,
.buf_len = size};
- struct ima_template_desc *template_desc = NULL;
+ struct ima_template_desc *template = NULL;
struct {
struct ima_digest_data hdr;
char digest[IMA_MAX_DIGEST_SIZE];
} hash = {};
int violation = 0;
- int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
int action = 0;
+ u32 secid;
- action = ima_get_action(NULL, cred, secid, 0, KEXEC_CMDLINE, &pcr,
- &template_desc);
- if (!(action & IMA_MEASURE))
+ if (!ima_policy_flag)
return;
+ /*
+ * Both LSM hooks and auxilary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxilary buffer measurements,
+ * retrieving the policy rule information only for the LSM hook
+ * buffer measurements.
+ */
+ if (func) {
+ security_task_getsecid(current, &secid);
+ action = ima_get_action(NULL, current_cred(), secid, 0, func,
+ &pcr, &template, keyring);
+ if (!(action & IMA_MEASURE))
+ return;
+ }
+
+ if (!pcr)
+ pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+
+ if (!template) {
+ template = lookup_template_desc("ima-buf");
+ ret = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (ret < 0) {
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), ret);
+ return;
+ }
+ }
+
iint.ima_hash = &hash.hdr;
iint.ima_hash->algo = ima_hash_algo;
iint.ima_hash->length = hash_digest_size[ima_hash_algo];
@@ -648,7 +747,7 @@ static void process_buffer_measurement(const void *buf, int size,
if (ret < 0)
goto out;
- ret = ima_alloc_init_template(&event_data, &entry, template_desc);
+ ret = ima_alloc_init_template(&event_data, &entry, template);
if (ret < 0)
goto out;
@@ -670,13 +769,9 @@ out:
*/
void ima_kexec_cmdline(const void *buf, int size)
{
- u32 secid;
-
- if (buf && size != 0) {
- security_task_getsecid(current, &secid);
+ if (buf && size != 0)
process_buffer_measurement(buf, size, "kexec-cmdline",
- current_cred(), secid);
- }
+ KEXEC_CMDLINE, 0, NULL);
}
static int __init init_ima(void)
diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
new file mode 100644
index 000000000000..d106885cc495
--- /dev/null
+++ b/security/integrity/ima/ima_modsig.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IMA support for appraising module-style appended signatures.
+ *
+ * Copyright (C) 2019 IBM Corporation
+ *
+ * Author:
+ * Thiago Jung Bauermann <bauerman@linux.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module_signature.h>
+#include <keys/asymmetric-type.h>
+#include <crypto/pkcs7.h>
+
+#include "ima.h"
+
+struct modsig {
+ struct pkcs7_message *pkcs7_msg;
+
+ enum hash_algo hash_algo;
+
+ /* This digest will go in the 'd-modsig' field of the IMA template. */
+ const u8 *digest;
+ u32 digest_size;
+
+ /*
+ * This is what will go to the measurement list if the template requires
+ * storing the signature.
+ */
+ int raw_pkcs7_len;
+ u8 raw_pkcs7[];
+};
+
+/**
+ * ima_hook_supports_modsig - can the policy allow modsig for this hook?
+ *
+ * modsig is only supported by hooks using ima_post_read_file(), because only
+ * they preload the contents of the file in a buffer. FILE_CHECK does that in
+ * some cases, but not when reached from vfs_open(). POLICY_CHECK can support
+ * it, but it's not useful in practice because it's a text file so deny.
+ */
+bool ima_hook_supports_modsig(enum ima_hooks func)
+{
+ switch (func) {
+ case KEXEC_KERNEL_CHECK:
+ case KEXEC_INITRAMFS_CHECK:
+ case MODULE_CHECK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * ima_read_modsig - Read modsig from buf.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig)
+{
+ const size_t marker_len = strlen(MODULE_SIG_STRING);
+ const struct module_signature *sig;
+ struct modsig *hdr;
+ size_t sig_len;
+ const void *p;
+ int rc;
+
+ if (buf_len <= marker_len + sizeof(*sig))
+ return -ENOENT;
+
+ p = buf + buf_len - marker_len;
+ if (memcmp(p, MODULE_SIG_STRING, marker_len))
+ return -ENOENT;
+
+ buf_len -= marker_len;
+ sig = (const struct module_signature *)(p - sizeof(*sig));
+
+ rc = mod_check_sig(sig, buf_len, func_tokens[func]);
+ if (rc)
+ return rc;
+
+ sig_len = be32_to_cpu(sig->sig_len);
+ buf_len -= sig_len + sizeof(*sig);
+
+ /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */
+ hdr = kzalloc(sizeof(*hdr) + sig_len, GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len);
+ if (IS_ERR(hdr->pkcs7_msg)) {
+ rc = PTR_ERR(hdr->pkcs7_msg);
+ kfree(hdr);
+ return rc;
+ }
+
+ memcpy(hdr->raw_pkcs7, buf + buf_len, sig_len);
+ hdr->raw_pkcs7_len = sig_len;
+
+ /* We don't know the hash algorithm yet. */
+ hdr->hash_algo = HASH_ALGO__LAST;
+
+ *modsig = hdr;
+
+ return 0;
+}
+
+/**
+ * ima_collect_modsig - Calculate the file hash without the appended signature.
+ *
+ * Since the modsig is part of the file contents, the hash used in its signature
+ * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+ * calculates a separate one for signature verification.
+ */
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size)
+{
+ int rc;
+
+ /*
+ * Provide the file contents (minus the appended sig) so that the PKCS7
+ * code can calculate the file hash.
+ */
+ size -= modsig->raw_pkcs7_len + strlen(MODULE_SIG_STRING) +
+ sizeof(struct module_signature);
+ rc = pkcs7_supply_detached_data(modsig->pkcs7_msg, buf, size);
+ if (rc)
+ return;
+
+ /* Ask the PKCS7 code to calculate the file hash. */
+ rc = pkcs7_get_digest(modsig->pkcs7_msg, &modsig->digest,
+ &modsig->digest_size, &modsig->hash_algo);
+}
+
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig)
+{
+ return verify_pkcs7_message_sig(NULL, 0, modsig->pkcs7_msg, keyring,
+ VERIFYING_MODULE_SIGNATURE, NULL, NULL);
+}
+
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size)
+{
+ *algo = modsig->hash_algo;
+ *digest = modsig->digest;
+ *digest_size = modsig->digest_size;
+
+ return 0;
+}
+
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len)
+{
+ *data = &modsig->raw_pkcs7;
+ *data_len = modsig->raw_pkcs7_len;
+
+ return 0;
+}
+
+void ima_free_modsig(struct modsig *modsig)
+{
+ if (!modsig)
+ return;
+
+ pkcs7_free_message(modsig->pkcs7_msg);
+ kfree(modsig);
+}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 6df7f641ff66..453427048999 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -6,6 +6,9 @@
* ima_policy.c
* - initialize default measure policy rules
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -31,6 +34,7 @@
#define IMA_EUID 0x0080
#define IMA_PCR 0x0100
#define IMA_FSNAME 0x0200
+#define IMA_KEYRINGS 0x0400
#define UNKNOWN 0
#define MEASURE 0x0001 /* same as IMA_MEASURE */
@@ -42,7 +46,7 @@
#define DONT_HASH 0x0200
#define INVALID_PCR(a) (((a) < 0) || \
- (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
+ (a) >= (sizeof_field(struct integrity_iint_cache, measured_pcrs) * 8))
int ima_policy_flag;
static int temp_ima_appraise;
@@ -76,6 +80,7 @@ struct ima_rule_entry {
int type; /* audit type */
} lsm[MAX_LSM_RULES];
char *fsname;
+ char *keyrings; /* Measure keys added to these keyrings */
struct ima_template_desc *template;
};
@@ -203,6 +208,10 @@ static LIST_HEAD(ima_policy_rules);
static LIST_HEAD(ima_temp_rules);
static struct list_head *ima_rules;
+/* Pre-allocated buffer used for matching keyrings. */
+static char *ima_keyrings;
+static size_t ima_keyrings_len;
+
static int ima_policy __initdata;
static int __init default_measure_policy_setup(char *str)
@@ -260,7 +269,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
{
struct ima_rule_entry *nentry;
- int i, result;
+ int i;
nentry = kmalloc(sizeof(*nentry), GFP_KERNEL);
if (!nentry)
@@ -271,10 +280,10 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
* lsm rules can change
*/
memcpy(nentry, entry, sizeof(*nentry));
- memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
+ memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
for (i = 0; i < MAX_LSM_RULES; i++) {
- if (!entry->lsm[i].rule)
+ if (!entry->lsm[i].args_p)
continue;
nentry->lsm[i].type = entry->lsm[i].type;
@@ -283,13 +292,13 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
if (!nentry->lsm[i].args_p)
goto out_err;
- result = security_filter_rule_init(nentry->lsm[i].type,
- Audit_equal,
- nentry->lsm[i].args_p,
- &nentry->lsm[i].rule);
- if (result == -EINVAL)
- pr_warn("ima: rule for LSM \'%d\' is undefined\n",
- entry->lsm[i].type);
+ security_filter_rule_init(nentry->lsm[i].type,
+ Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule);
+ if (!nentry->lsm[i].rule)
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ (char *)entry->lsm[i].args_p);
}
return nentry;
@@ -326,7 +335,7 @@ static void ima_lsm_update_rules(void)
list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
needs_update = 0;
for (i = 0; i < MAX_LSM_RULES; i++) {
- if (entry->lsm[i].rule) {
+ if (entry->lsm[i].args_p) {
needs_update = 1;
break;
}
@@ -336,8 +345,7 @@ static void ima_lsm_update_rules(void)
result = ima_lsm_update_rule(entry);
if (result) {
- pr_err("ima: lsm rule update error %d\n",
- result);
+ pr_err("lsm rule update error %d\n", result);
return;
}
}
@@ -354,25 +362,70 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
}
/**
- * ima_match_rules - determine whether an inode matches the measure rule.
+ * ima_match_keyring - determine whether the keyring matches the measure rule
+ * @rule: a pointer to a rule
+ * @keyring: name of the keyring to match against the measure rule
+ * @cred: a pointer to a credentials structure for user validation
+ *
+ * Returns true if keyring matches one in the rule, false otherwise.
+ */
+static bool ima_match_keyring(struct ima_rule_entry *rule,
+ const char *keyring, const struct cred *cred)
+{
+ char *next_keyring, *keyrings_ptr;
+ bool matched = false;
+
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+
+ if (!rule->keyrings)
+ return true;
+
+ if (!keyring)
+ return false;
+
+ strcpy(ima_keyrings, rule->keyrings);
+
+ /*
+ * "keyrings=" is specified in the policy in the format below:
+ * keyrings=.builtin_trusted_keys|.ima|.evm
+ */
+ keyrings_ptr = ima_keyrings;
+ while ((next_keyring = strsep(&keyrings_ptr, "|")) != NULL) {
+ if (!strcmp(next_keyring, keyring)) {
+ matched = true;
+ break;
+ }
+ }
+
+ return matched;
+}
+
+/**
+ * ima_match_rules - determine whether an inode matches the policy rule.
* @rule: a pointer to a rule
* @inode: a pointer to an inode
* @cred: a pointer to a credentials structure for user validation
* @secid: the secid of the task to be validated
* @func: LIM hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @keyring: keyring name to check in policy for KEY_CHECK func
*
* Returns true on rule match, false on failure.
*/
static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
const struct cred *cred, u32 secid,
- enum ima_hooks func, int mask)
+ enum ima_hooks func, int mask,
+ const char *keyring)
{
int i;
- if (func == KEXEC_CMDLINE) {
- if ((rule->flags & IMA_FUNC) && (rule->func == func))
+ if ((func == KEXEC_CMDLINE) || (func == KEY_CHECK)) {
+ if ((rule->flags & IMA_FUNC) && (rule->func == func)) {
+ if (func == KEY_CHECK)
+ return ima_match_keyring(rule, keyring, cred);
return true;
+ }
return false;
}
if ((rule->flags & IMA_FUNC) &&
@@ -412,9 +465,12 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
int rc = 0;
u32 osid;
- if (!rule->lsm[i].rule)
- continue;
-
+ if (!rule->lsm[i].rule) {
+ if (!rule->lsm[i].args_p)
+ continue;
+ else
+ return false;
+ }
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
@@ -476,6 +532,8 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
* @pcr: set the pcr to extend
* @template_desc: the template that should be used for this rule
+ * @keyring: the keyring name, if given, to be used to check in the policy.
+ * keyring can be NULL if func is anything other than KEY_CHECK.
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
@@ -486,18 +544,23 @@ static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
*/
int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
enum ima_hooks func, int mask, int flags, int *pcr,
- struct ima_template_desc **template_desc)
+ struct ima_template_desc **template_desc,
+ const char *keyring)
{
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
+ if (template_desc)
+ *template_desc = ima_template_desc_current();
+
rcu_read_lock();
list_for_each_entry_rcu(entry, ima_rules, list) {
if (!(entry->action & actmask))
continue;
- if (!ima_match_rules(entry, inode, cred, secid, func, mask))
+ if (!ima_match_rules(entry, inode, cred, secid, func, mask,
+ keyring))
continue;
action |= entry->flags & IMA_ACTION_FLAGS;
@@ -510,6 +573,7 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
action |= IMA_FAIL_UNVERIFIABLE_SIGS;
}
+
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
else
@@ -520,8 +584,6 @@ int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
if (template_desc && entry->template)
*template_desc = entry->template;
- else if (template_desc)
- *template_desc = ima_template_desc_current();
if (!actmask)
break;
@@ -747,6 +809,9 @@ void ima_update_policy(void)
kfree(arch_policy_entry);
}
ima_update_policy_flag();
+
+ /* Custom IMA policy has been loaded */
+ ima_process_queued_keys();
}
/* Keep the enumeration in sync with the policy_tokens! */
@@ -760,8 +825,9 @@ enum {
Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
- Opt_appraise_type, Opt_permit_directio,
- Opt_pcr, Opt_template, Opt_err
+ Opt_appraise_type, Opt_appraise_flag,
+ Opt_permit_directio, Opt_pcr, Opt_template, Opt_keyrings,
+ Opt_err
};
static const match_table_t policy_tokens = {
@@ -793,9 +859,11 @@ static const match_table_t policy_tokens = {
{Opt_euid_lt, "euid<%s"},
{Opt_fowner_lt, "fowner<%s"},
{Opt_appraise_type, "appraise_type=%s"},
+ {Opt_appraise_flag, "appraise_flag=%s"},
{Opt_permit_directio, "permit_directio"},
{Opt_pcr, "pcr=%s"},
{Opt_template, "template=%s"},
+ {Opt_keyrings, "keyrings=%s"},
{Opt_err, NULL}
};
@@ -817,8 +885,14 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
entry->lsm[lsm_rule].args_p,
&entry->lsm[lsm_rule].rule);
if (!entry->lsm[lsm_rule].rule) {
- kfree(entry->lsm[lsm_rule].args_p);
- return -EINVAL;
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ (char *)entry->lsm[lsm_rule].args_p);
+
+ if (ima_rules == &ima_default_rules) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ result = -EINVAL;
+ } else
+ result = 0;
}
return result;
@@ -843,6 +917,38 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
ima_log_string_op(ab, key, value, NULL);
}
+/*
+ * Validating the appended signature included in the measurement list requires
+ * the file hash calculated without the appended signature (i.e., the 'd-modsig'
+ * field). Therefore, notify the user if they have the 'modsig' field but not
+ * the 'd-modsig' field in the template.
+ */
+static void check_template_modsig(const struct ima_template_desc *template)
+{
+#define MSG "template with 'modsig' field also needs 'd-modsig' field\n"
+ bool has_modsig, has_dmodsig;
+ static bool checked;
+ int i;
+
+ /* We only need to notify the user once. */
+ if (checked)
+ return;
+
+ has_modsig = has_dmodsig = false;
+ for (i = 0; i < template->num_fields; i++) {
+ if (!strcmp(template->fields[i]->field_id, "modsig"))
+ has_modsig = true;
+ else if (!strcmp(template->fields[i]->field_id, "d-modsig"))
+ has_dmodsig = true;
+ }
+
+ if (has_modsig && !has_dmodsig)
+ pr_notice(MSG);
+
+ checked = true;
+#undef MSG
+}
+
static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
{
struct audit_buffer *ab;
@@ -851,6 +957,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
bool uid_token;
struct ima_template_desc *template_desc;
int result = 0;
+ size_t keyrings_len;
ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
AUDIT_INTEGRITY_POLICY_RULE);
@@ -959,6 +1066,8 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->func = POLICY_CHECK;
else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
entry->func = KEXEC_CMDLINE;
+ else if (strcmp(args[0].from, "KEY_CHECK") == 0)
+ entry->func = KEY_CHECK;
else
result = -EINVAL;
if (!result)
@@ -1011,6 +1120,44 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
result = 0;
entry->flags |= IMA_FSNAME;
break;
+ case Opt_keyrings:
+ ima_log_string(ab, "keyrings", args[0].from);
+
+ keyrings_len = strlen(args[0].from) + 1;
+
+ if ((entry->keyrings) ||
+ (entry->action != MEASURE) ||
+ (entry->func != KEY_CHECK) ||
+ (keyrings_len < 2)) {
+ result = -EINVAL;
+ break;
+ }
+
+ if (keyrings_len > ima_keyrings_len) {
+ char *tmpbuf;
+
+ tmpbuf = krealloc(ima_keyrings, keyrings_len,
+ GFP_KERNEL);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ break;
+ }
+
+ ima_keyrings = tmpbuf;
+ ima_keyrings_len = keyrings_len;
+ }
+
+ entry->keyrings = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->keyrings) {
+ kfree(ima_keyrings);
+ ima_keyrings = NULL;
+ ima_keyrings_len = 0;
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_KEYRINGS;
+ break;
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
@@ -1128,9 +1275,18 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
ima_log_string(ab, "appraise_type", args[0].from);
if ((strcmp(args[0].from, "imasig")) == 0)
entry->flags |= IMA_DIGSIG_REQUIRED;
+ else if (ima_hook_supports_modsig(entry->func) &&
+ strcmp(args[0].from, "imasig|modsig") == 0)
+ entry->flags |= IMA_DIGSIG_REQUIRED |
+ IMA_MODSIG_ALLOWED;
else
result = -EINVAL;
break;
+ case Opt_appraise_flag:
+ ima_log_string(ab, "appraise_flag", args[0].from);
+ if (strstr(args[0].from, "blacklist"))
+ entry->flags |= IMA_CHECK_BLACKLIST;
+ break;
case Opt_permit_directio:
entry->flags |= IMA_PERMIT_DIRECTIO;
break;
@@ -1181,6 +1337,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
else if (entry->action == APPRAISE)
temp_ima_appraise |= ima_appraise_flag(entry->func);
+ if (!result && entry->flags & IMA_MODSIG_ALLOWED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_modsig(template_desc);
+ }
+
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@@ -1252,6 +1414,12 @@ void ima_delete_rules(void)
}
}
+#define __ima_hook_stringify(str) (#str),
+
+const char *const func_tokens[] = {
+ __ima_hooks(__ima_hook_stringify)
+};
+
#ifdef CONFIG_IMA_READ_POLICY
enum {
mask_exec = 0, mask_write, mask_read, mask_append
@@ -1264,12 +1432,6 @@ static const char *const mask_tokens[] = {
"^MAY_APPEND"
};
-#define __ima_hook_stringify(str) (#str),
-
-static const char *const func_tokens[] = {
- __ima_hooks(__ima_hook_stringify)
-};
-
void *ima_policy_start(struct seq_file *m, loff_t *pos)
{
loff_t l = *pos;
@@ -1371,6 +1533,13 @@ int ima_policy_show(struct seq_file *m, void *v)
seq_puts(m, " ");
}
+ if (entry->flags & IMA_KEYRINGS) {
+ if (entry->keyrings != NULL)
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->keyrings);
+ seq_printf(m, pt(Opt_keyrings), tbuf);
+ seq_puts(m, " ");
+ }
+
if (entry->flags & IMA_PCR) {
snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
seq_printf(m, pt(Opt_pcr), tbuf);
@@ -1443,12 +1612,19 @@ int ima_policy_show(struct seq_file *m, void *v)
(char *)entry->lsm[i].args_p);
break;
}
+ seq_puts(m, " ");
}
}
if (entry->template)
seq_printf(m, "template=%s ", entry->template->name);
- if (entry->flags & IMA_DIGSIG_REQUIRED)
- seq_puts(m, "appraise_type=imasig ");
+ if (entry->flags & IMA_DIGSIG_REQUIRED) {
+ if (entry->flags & IMA_MODSIG_ALLOWED)
+ seq_puts(m, "appraise_type=imasig|modsig ");
+ else
+ seq_puts(m, "appraise_type=imasig ");
+ }
+ if (entry->flags & IMA_CHECK_BLACKLIST)
+ seq_puts(m, "appraise_flag=check_blacklist ");
if (entry->flags & IMA_PERMIT_DIRECTIO)
seq_puts(m, "permit_directio ");
rcu_read_unlock();
@@ -1456,3 +1632,53 @@ int ima_policy_show(struct seq_file *m, void *v)
return 0;
}
#endif /* CONFIG_IMA_READ_POLICY */
+
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+/*
+ * ima_appraise_signature: whether IMA will appraise a given function using
+ * an IMA digital signature. This is restricted to cases where the kernel
+ * has a set of built-in trusted keys in order to avoid an attacker simply
+ * loading additional keys.
+ */
+bool ima_appraise_signature(enum kernel_read_file_id id)
+{
+ struct ima_rule_entry *entry;
+ bool found = false;
+ enum ima_hooks func;
+
+ if (id >= READING_MAX_ID)
+ return false;
+
+ func = read_idmap[id] ?: FILE_CHECK;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, ima_rules, list) {
+ if (entry->action != APPRAISE)
+ continue;
+
+ /*
+ * A generic entry will match, but otherwise require that it
+ * match the func we're looking for
+ */
+ if (entry->func && entry->func != func)
+ continue;
+
+ /*
+ * We require this to be a digital signature, not a raw IMA
+ * hash.
+ */
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ found = true;
+
+ /*
+ * We've found a rule that matches, so break now even if it
+ * didn't require a digital signature - a later rule that does
+ * won't override it, so would be a false positive.
+ */
+ break;
+ }
+
+ rcu_read_unlock();
+ return found;
+}
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
diff --git a/security/integrity/ima/ima_queue_keys.c b/security/integrity/ima/ima_queue_keys.c
new file mode 100644
index 000000000000..c87c72299191
--- /dev/null
+++ b/security/integrity/ima/ima_queue_keys.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_queue_keys.c
+ * Enables deferred processing of keys
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/workqueue.h>
+#include <keys/asymmetric-type.h>
+#include "ima.h"
+
+/*
+ * Flag to indicate whether a key can be processed
+ * right away or should be queued for processing later.
+ */
+static bool ima_process_keys;
+
+/*
+ * To synchronize access to the list of keys that need to be measured
+ */
+static DEFINE_MUTEX(ima_keys_lock);
+static LIST_HEAD(ima_keys);
+
+/*
+ * If custom IMA policy is not loaded then keys queued up
+ * for measurement should be freed. This worker is used
+ * for handling this scenario.
+ */
+static long ima_key_queue_timeout = 300000; /* 5 Minutes */
+static void ima_keys_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ima_keys_delayed_work, ima_keys_handler);
+static bool timer_expired;
+
+/*
+ * This worker function frees keys that may still be
+ * queued up in case custom IMA policy was not loaded.
+ */
+static void ima_keys_handler(struct work_struct *work)
+{
+ timer_expired = true;
+ ima_process_queued_keys();
+}
+
+/*
+ * This function sets up a worker to free queued keys in case
+ * custom IMA policy was never loaded.
+ */
+void ima_init_key_queue(void)
+{
+ schedule_delayed_work(&ima_keys_delayed_work,
+ msecs_to_jiffies(ima_key_queue_timeout));
+}
+
+static void ima_free_key_entry(struct ima_key_entry *entry)
+{
+ if (entry) {
+ kfree(entry->payload);
+ kfree(entry->keyring_name);
+ kfree(entry);
+ }
+}
+
+static struct ima_key_entry *ima_alloc_key_entry(struct key *keyring,
+ const void *payload,
+ size_t payload_len)
+{
+ int rc = 0;
+ struct ima_key_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->payload = kmemdup(payload, payload_len, GFP_KERNEL);
+ entry->keyring_name = kstrdup(keyring->description,
+ GFP_KERNEL);
+ entry->payload_len = payload_len;
+ }
+
+ if ((entry == NULL) || (entry->payload == NULL) ||
+ (entry->keyring_name == NULL)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+
+out:
+ if (rc) {
+ ima_free_key_entry(entry);
+ entry = NULL;
+ }
+
+ return entry;
+}
+
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len)
+{
+ bool queued = false;
+ struct ima_key_entry *entry;
+
+ entry = ima_alloc_key_entry(keyring, payload, payload_len);
+ if (!entry)
+ return false;
+
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ list_add_tail(&entry->list, &ima_keys);
+ queued = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!queued)
+ ima_free_key_entry(entry);
+
+ return queued;
+}
+
+/*
+ * ima_process_queued_keys() - process keys queued for measurement
+ *
+ * This function sets ima_process_keys to true and processes queued keys.
+ * From here on keys will be processed right away (not queued).
+ */
+void ima_process_queued_keys(void)
+{
+ struct ima_key_entry *entry, *tmp;
+ bool process = false;
+
+ if (ima_process_keys)
+ return;
+
+ /*
+ * Since ima_process_keys is set to true, any new key will be
+ * processed immediately and not be queued to ima_keys list.
+ * First one setting the ima_process_keys flag to true will
+ * process the queued keys.
+ */
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ ima_process_keys = true;
+ process = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!process)
+ return;
+
+ if (!timer_expired)
+ cancel_delayed_work_sync(&ima_keys_delayed_work);
+
+ list_for_each_entry_safe(entry, tmp, &ima_keys, list) {
+ if (!timer_expired)
+ process_buffer_measurement(entry->payload,
+ entry->payload_len,
+ entry->keyring_name,
+ KEY_CHECK, 0,
+ entry->keyring_name);
+ list_del(&entry->list);
+ ima_free_key_entry(entry);
+ }
+}
+
+inline bool ima_should_queue_key(void)
+{
+ return !ima_process_keys;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index cb349d7b2601..6aa6408603e3 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -23,6 +23,7 @@ static struct ima_template_desc builtin_templates[] = {
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
{.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
+ {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
@@ -42,6 +43,10 @@ static const struct ima_template_field supported_fields[] = {
.field_show = ima_show_template_sig},
{.field_id = "buf", .field_init = ima_eventbuf_init,
.field_show = ima_show_template_buf},
+ {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "modsig", .field_init = ima_eventmodsig_init,
+ .field_show = ima_show_template_sig},
};
/*
@@ -49,10 +54,29 @@ static const struct ima_template_field supported_fields[] = {
* need to be accounted for since they shouldn't be defined in the same template
* description as 'd-ng' and 'n-ng' respectively.
*/
-#define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf")
+#define MAX_TEMPLATE_NAME_LEN sizeof("d-ng|n-ng|sig|buf|d-modisg|modsig")
static struct ima_template_desc *ima_template;
+/**
+ * ima_template_has_modsig - Check whether template has modsig-related fields.
+ * @ima_template: IMA template to check.
+ *
+ * Tells whether the given template has fields referencing a file's appended
+ * signature.
+ */
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template)
+{
+ int i;
+
+ for (i = 0; i < ima_template->num_fields; i++)
+ if (!strcmp(ima_template->fields[i]->field_id, "modsig") ||
+ !strcmp(ima_template->fields[i]->field_id, "d-modsig"))
+ return true;
+
+ return false;
+}
+
static int __init ima_template_setup(char *str)
{
struct ima_template_desc *template_desc;
@@ -282,9 +306,8 @@ static int ima_restore_template_data(struct ima_template_desc *template_desc,
int ret = 0;
int i;
- *entry = kzalloc(sizeof(**entry) +
- template_desc->num_fields * sizeof(struct ima_field_data),
- GFP_NOFS);
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
if (!*entry)
return -ENOMEM;
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 2fb9a10bc6b7..32ae05d88257 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -225,7 +225,8 @@ int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
return 0;
}
-static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,
+ u8 hash_algo,
struct ima_field_data *field_data)
{
/*
@@ -328,6 +329,41 @@ out:
hash_algo, field_data);
}
+/*
+ * This function writes the digest of the file which is expected to match the
+ * digest contained in the file's appended signature.
+ */
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ enum hash_algo hash_algo;
+ const u8 *cur_digest;
+ u32 cur_digestsize;
+
+ if (!event_data->modsig)
+ return 0;
+
+ if (event_data->violation) {
+ /* Recording a violation. */
+ hash_algo = HASH_ALGO_SHA1;
+ cur_digest = NULL;
+ cur_digestsize = 0;
+ } else {
+ int rc;
+
+ rc = ima_get_modsig_digest(event_data->modsig, &hash_algo,
+ &cur_digest, &cur_digestsize);
+ if (rc)
+ return rc;
+ else if (hash_algo == HASH_ALGO__LAST || cur_digestsize == 0)
+ /* There was some error collecting the digest. */
+ return -EINVAL;
+ }
+
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data);
+}
+
static int ima_eventname_init_common(struct ima_event_data *event_data,
struct ima_field_data *field_data,
bool size_limit)
@@ -406,3 +442,29 @@ int ima_eventbuf_init(struct ima_event_data *event_data,
event_data->buf_len, DATA_FMT_HEX,
field_data);
}
+
+/*
+ * ima_eventmodsig_init - include the appended file signature as part of the
+ * template data
+ */
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ const void *data;
+ u32 data_len;
+ int rc;
+
+ if (!event_data->modsig)
+ return 0;
+
+ /*
+ * modsig is a runtime structure containing pointers. Get its raw data
+ * instead.
+ */
+ rc = ima_get_raw_modsig(event_data->modsig, &data, &data_len);
+ if (rc)
+ return rc;
+
+ return ima_write_template_field_data(data, data_len, DATA_FMT_HEX,
+ field_data);
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
index 652aa5de81ef..9a88c79a7a61 100644
--- a/security/integrity/ima/ima_template_lib.h
+++ b/security/integrity/ima/ima_template_lib.h
@@ -36,10 +36,14 @@ int ima_eventname_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventdigest_ng_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
int ima_eventname_ng_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventsig_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
int ima_eventbuf_init(struct ima_event_data *event_data,
struct ima_field_data *field_data);
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index ed12d8e13d04..73fc286834d7 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -31,6 +31,8 @@
#define IMA_NEW_FILE 0x04000000
#define EVM_IMMUTABLE_DIGSIG 0x08000000
#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
+#define IMA_MODSIG_ALLOWED 0x20000000
+#define IMA_CHECK_BLACKLIST 0x40000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
IMA_HASH | IMA_APPRAISE_SUBMASK)
@@ -147,10 +149,13 @@ int integrity_kernel_read(struct file *file, loff_t offset,
extern struct dentry *integrity_dir;
+struct modsig;
+
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
const char *digest, int digestlen);
+int integrity_modsig_verify(unsigned int id, const struct modsig *modsig);
int __init integrity_init_keyring(const unsigned int id);
int __init integrity_load_x509(const unsigned int id, const char *path);
@@ -165,6 +170,12 @@ static inline int integrity_digsig_verify(const unsigned int id,
return -EOPNOTSUPP;
}
+static inline int integrity_modsig_verify(unsigned int id,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int integrity_init_keyring(const unsigned int id)
{
return 0;
@@ -190,6 +201,16 @@ static inline int asymmetric_verify(struct key *keyring, const char *sig,
}
#endif
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig);
+#else
+static inline int ima_modsig_verify(struct key *keyring,
+ const struct modsig *modsig)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_IMA_LOAD_X509
void __init ima_load_x509(void);
#else
diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
new file mode 100644
index 000000000000..c5ba695c10e3
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include "../integrity.h"
+
+static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
+static efi_guid_t efi_cert_x509_sha256_guid __initdata =
+ EFI_CERT_X509_SHA256_GUID;
+static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
+
+/*
+ * Blacklist a hash.
+ */
+static __init void uefi_blacklist_hash(const char *source, const void *data,
+ size_t len, const char *type,
+ size_t type_len)
+{
+ char *hash, *p;
+
+ hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
+ if (!hash)
+ return;
+ p = memcpy(hash, type, type_len);
+ p += type_len;
+ bin2hex(p, data, len);
+ p += len * 2;
+ *p = 0;
+
+ mark_hash_blacklisted(hash);
+ kfree(hash);
+}
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+static __init void uefi_blacklist_x509_tbs(const char *source,
+ const void *data, size_t len)
+{
+ uefi_blacklist_hash(source, data, len, "tbs:", 4);
+}
+
+/*
+ * Blacklist the hash of an executable.
+ */
+static __init void uefi_blacklist_binary(const char *source,
+ const void *data, size_t len)
+{
+ uefi_blacklist_hash(source, data, len, "bin:", 4);
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI db and MokListRT tables.
+ */
+__init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return add_to_platform_keyring;
+ return 0;
+}
+
+/*
+ * Return the appropriate handler for particular signature list types found in
+ * the UEFI dbx and MokListXRT tables.
+ */
+__init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
+ return uefi_blacklist_x509_tbs;
+ if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
+ return uefi_blacklist_binary;
+ return 0;
+}
diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
new file mode 100644
index 000000000000..2462bfa08fe3
--- /dev/null
+++ b/security/integrity/platform_certs/keyring_handler.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef PLATFORM_CERTS_INTERNAL_H
+#define PLATFORM_CERTS_INTERNAL_H
+
+#include <linux/efi.h>
+
+void blacklist_hash(const char *source, const void *data,
+ size_t len, const char *type,
+ size_t type_len);
+
+/*
+ * Blacklist an X509 TBS hash.
+ */
+void blacklist_x509_tbs(const char *source, const void *data, size_t len);
+
+/*
+ * Blacklist the hash of an executable.
+ */
+void blacklist_binary(const char *source, const void *data, size_t len);
+
+/*
+ * Return the handler for particular signature list types found in the db.
+ */
+efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
+
+/*
+ * Return the handler for particular signature list types found in the dbx.
+ */
+efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
+
+#endif
diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c
new file mode 100644
index 000000000000..a2900cb85357
--- /dev/null
+++ b/security/integrity/platform_certs/load_powerpc.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 IBM Corporation
+ * Author: Nayna Jain
+ *
+ * - loads keys and hashes stored and controlled by the firmware.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <asm/secure_boot.h>
+#include <asm/secvar.h>
+#include "keyring_handler.h"
+
+/*
+ * Get a certificate list blob from the named secure variable.
+ */
+static __init void *get_cert_list(u8 *key, unsigned long keylen, uint64_t *size)
+{
+ int rc;
+ void *db;
+
+ rc = secvar_ops->get(key, keylen, NULL, size);
+ if (rc) {
+ pr_err("Couldn't get size: %d\n", rc);
+ return NULL;
+ }
+
+ db = kmalloc(*size, GFP_KERNEL);
+ if (!db)
+ return NULL;
+
+ rc = secvar_ops->get(key, keylen, db, size);
+ if (rc) {
+ kfree(db);
+ pr_err("Error reading %s var: %d\n", key, rc);
+ return NULL;
+ }
+
+ return db;
+}
+
+/*
+ * Load the certs contained in the keys databases into the platform trusted
+ * keyring and the blacklisted X.509 cert SHA256 hashes into the blacklist
+ * keyring.
+ */
+static int __init load_powerpc_certs(void)
+{
+ void *db = NULL, *dbx = NULL;
+ uint64_t dbsize = 0, dbxsize = 0;
+ int rc = 0;
+ struct device_node *node;
+
+ if (!secvar_ops)
+ return -ENODEV;
+
+ /* The following only applies for the edk2-compat backend. */
+ node = of_find_compatible_node(NULL, NULL, "ibm,edk2-compat-v1");
+ if (!node)
+ return -ENODEV;
+
+ /*
+ * Get db, and dbx. They might not exist, so it isn't an error if we
+ * can't get them.
+ */
+ db = get_cert_list("db", 3, &dbsize);
+ if (!db) {
+ pr_err("Couldn't get db list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:db", db, dbsize,
+ get_handler_for_db);
+ if (rc)
+ pr_err("Couldn't parse db signatures: %d\n", rc);
+ kfree(db);
+ }
+
+ dbx = get_cert_list("dbx", 4, &dbxsize);
+ if (!dbx) {
+ pr_info("Couldn't get dbx list from firmware\n");
+ } else {
+ rc = parse_efi_signature_list("powerpc:dbx", dbx, dbxsize,
+ get_handler_for_dbx);
+ if (rc)
+ pr_err("Couldn't parse dbx signatures: %d\n", rc);
+ kfree(dbx);
+ }
+
+ of_node_put(node);
+
+ return rc;
+}
+late_initcall(load_powerpc_certs);
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index 81b19c52832b..111898aad56e 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -9,11 +9,7 @@
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include "../integrity.h"
-
-static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
-static efi_guid_t efi_cert_x509_sha256_guid __initdata =
- EFI_CERT_X509_SHA256_GUID;
-static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
+#include "keyring_handler.h"
/*
* Look to see if a UEFI variable called MokIgnoreDB exists and return true if
@@ -68,72 +64,6 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
}
/*
- * Blacklist a hash.
- */
-static __init void uefi_blacklist_hash(const char *source, const void *data,
- size_t len, const char *type,
- size_t type_len)
-{
- char *hash, *p;
-
- hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
- if (!hash)
- return;
- p = memcpy(hash, type, type_len);
- p += type_len;
- bin2hex(p, data, len);
- p += len * 2;
- *p = 0;
-
- mark_hash_blacklisted(hash);
- kfree(hash);
-}
-
-/*
- * Blacklist an X509 TBS hash.
- */
-static __init void uefi_blacklist_x509_tbs(const char *source,
- const void *data, size_t len)
-{
- uefi_blacklist_hash(source, data, len, "tbs:", 4);
-}
-
-/*
- * Blacklist the hash of an executable.
- */
-static __init void uefi_blacklist_binary(const char *source,
- const void *data, size_t len)
-{
- uefi_blacklist_hash(source, data, len, "bin:", 4);
-}
-
-/*
- * Return the appropriate handler for particular signature list types found in
- * the UEFI db and MokListRT tables.
- */
-static __init efi_element_handler_t get_handler_for_db(const efi_guid_t *
- sig_type)
-{
- if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
- return add_to_platform_keyring;
- return 0;
-}
-
-/*
- * Return the appropriate handler for particular signature list types found in
- * the UEFI dbx and MokListXRT tables.
- */
-static __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *
- sig_type)
-{
- if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
- return uefi_blacklist_x509_tbs;
- if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
- return uefi_blacklist_binary;
- return 0;
-}
-
-/*
* Load the certs contained in the UEFI databases into the platform trusted
* keyring and the UEFI blacklisted X.509 cert SHA256 hashes into the blacklist
* keyring.
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index dd313438fecf..47c041563d41 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -21,10 +21,6 @@ config KEYS
If you are unsure as to whether this is required, answer N.
-config KEYS_COMPAT
- def_bool y
- depends on COMPAT && KEYS
-
config KEYS_REQUEST_CACHE
bool "Enable temporary caching of the last request_key() result"
depends on KEYS
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 9cef54064f60..5f40807f05b3 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -17,7 +17,7 @@ obj-y := \
request_key_auth.o \
user_defined.o
compat-obj-$(CONFIG_KEY_DH_OPERATIONS) += compat_dh.o
-obj-$(CONFIG_KEYS_COMPAT) += compat.o $(compat-obj-y)
+obj-$(CONFIG_COMPAT) += compat.o $(compat-obj-y)
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
@@ -28,5 +28,5 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
# Key types
#
obj-$(CONFIG_BIG_KEYS) += big_key.o
-obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted-keys/
obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 9bcc404131aa..b975f8f11124 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -46,11 +46,6 @@ static long compat_keyctl_instantiate_key_iov(
/*
* The key control system call, 32-bit compatibility version for 64-bit archs
- *
- * This should only be called if the 64-bit arch uses weird pointers in 32-bit
- * mode or doesn't guarantee that the top 32-bits of the argument registers on
- * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl()
- * directly.
*/
COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
u32, arg2, u32, arg3, u32, arg4, u32, arg5)
diff --git a/security/keys/internal.h b/security/keys/internal.h
index c039373488bd..ba3e2da14cef 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -264,7 +264,7 @@ extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
size_t, struct keyctl_kdf_params __user *);
extern long __keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
size_t, struct keyctl_kdf_params *);
-#ifdef CONFIG_KEYS_COMPAT
+#ifdef CONFIG_COMPAT
extern long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
char __user *buffer, size_t buflen,
struct compat_keyctl_kdf_params __user *kdf);
@@ -279,7 +279,7 @@ static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
return -EOPNOTSUPP;
}
-#ifdef CONFIG_KEYS_COMPAT
+#ifdef CONFIG_COMPAT
static inline long compat_keyctl_dh_compute(
struct keyctl_dh_params __user *params,
char __user *buffer, size_t buflen,
diff --git a/security/keys/key.c b/security/keys/key.c
index 764f4c57913e..718bf7217420 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -13,6 +13,7 @@
#include <linux/security.h>
#include <linux/workqueue.h>
#include <linux/random.h>
+#include <linux/ima.h>
#include <linux/err.h>
#include "internal.h"
@@ -936,6 +937,9 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
goto error_link_end;
}
+ ima_post_key_create_or_update(keyring, key, payload, plen,
+ flags, true);
+
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
@@ -965,6 +969,12 @@ error:
}
key_ref = __key_update(key_ref, &prep);
+
+ if (!IS_ERR(key_ref))
+ ima_post_key_create_or_update(keyring, key,
+ payload, plen,
+ flags, false);
+
goto error_free_prep;
}
EXPORT_SYMBOL(key_create_or_update);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 7325f382dbf4..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
key = check_cached_key(&ctx);
if (key)
- return key;
+ goto error_free;
/* search all the process keyrings for a key */
rcu_read_lock();
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e73ec040e250..ecba39c93fd9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
{
struct request_key_auth *rka = dereference_key_rcu(key);
+ if (!rka)
+ return;
+
seq_puts(m, "key:");
seq_puts(m, key->description);
if (key_is_positive(key))
@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
size_t datalen;
long ret;
+ if (!rka)
+ return -EKEYREVOKED;
+
datalen = rka->callout_len;
ret = datalen;
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
new file mode 100644
index 000000000000..7b73cebbb378
--- /dev/null
+++ b/security/keys/trusted-keys/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for trusted keys
+#
+
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_tpm1.o
+trusted-y += trusted_tpm2.o
diff --git a/security/keys/trusted.c b/security/keys/trusted-keys/trusted_tpm1.c
index 9a94672e7adc..d2c5ec1e040b 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -27,7 +27,7 @@
#include <linux/tpm.h>
#include <linux/tpm_command.h>
-#include <keys/trusted.h>
+#include <keys/trusted_tpm.h>
static const char hmac_alg[] = "hmac(sha1)";
static const char hash_alg[] = "sha1";
@@ -406,13 +406,10 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
if (ret != TPM_NONCE_SIZE)
return ret;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OSAP_SIZE);
- store32(tb, TPM_ORD_OSAP);
- store16(tb, type);
- store32(tb, handle);
- storebytes(tb, ononce, TPM_NONCE_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ tpm_buf_append_u16(tb, type);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append(tb, ononce, TPM_NONCE_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -437,10 +434,7 @@ int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
if (!chip)
return -ENODEV;
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_OIAP_SIZE);
- store32(tb, TPM_ORD_OIAP);
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -535,20 +529,17 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
goto out;
/* build and send the TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
- store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
- store32(tb, TPM_ORD_SEAL);
- store32(tb, keyhandle);
- storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
- store32(tb, pcrinfosize);
- storebytes(tb, pcrinfo, pcrinfosize);
- store32(tb, datalen);
- storebytes(tb, data, datalen);
- store32(tb, sess.handle);
- storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, td->encauth, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, pcrinfosize);
+ tpm_buf_append(tb, pcrinfo, pcrinfosize);
+ tpm_buf_append_u32(tb, datalen);
+ tpm_buf_append(tb, data, datalen);
+ tpm_buf_append_u32(tb, sess.handle);
+ tpm_buf_append(tb, td->nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, td->pubauth, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
@@ -594,7 +585,6 @@ static int tpm_unseal(struct tpm_buf *tb,
uint32_t authhandle2 = 0;
unsigned char cont = 0;
uint32_t ordinal;
- uint32_t keyhndl;
int ret;
/* sessions for unsealing key and data */
@@ -610,7 +600,6 @@ static int tpm_unseal(struct tpm_buf *tb,
}
ordinal = htonl(TPM_ORD_UNSEAL);
- keyhndl = htonl(SRKHANDLE);
ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
@@ -628,20 +617,17 @@ static int tpm_unseal(struct tpm_buf *tb,
return ret;
/* build and send TPM request packet */
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
- store32(tb, TPM_UNSEAL_SIZE + bloblen);
- store32(tb, TPM_ORD_UNSEAL);
- store32(tb, keyhandle);
- storebytes(tb, blob, bloblen);
- store32(tb, authhandle1);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
- store32(tb, authhandle2);
- storebytes(tb, nonceodd, TPM_NONCE_SIZE);
- store8(tb, cont);
- storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_UNSEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle1);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata1, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, authhandle2);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata2, SHA1_DIGEST_SIZE);
ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
@@ -670,23 +656,23 @@ static int tpm_unseal(struct tpm_buf *tb,
static int key_seal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
/* include migratable flag at end of sealed key */
p->key[p->key_len] = p->migratable;
- ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+ ret = tpm_seal(&tb, o->keytype, o->keyhandle, o->keyauth,
p->key, p->key_len + 1, p->blob, &p->blob_len,
o->blobauth, o->pcrinfo, o->pcrinfo_len);
if (ret < 0)
pr_info("trusted_key: srkseal failed (%d)\n", ret);
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -696,14 +682,14 @@ static int key_seal(struct trusted_key_payload *p,
static int key_unseal(struct trusted_key_payload *p,
struct trusted_key_options *o)
{
- struct tpm_buf *tb;
+ struct tpm_buf tb;
int ret;
- tb = kzalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
- ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
o->blobauth, p->key, &p->key_len);
if (ret < 0)
pr_info("trusted_key: srkunseal failed (%d)\n", ret);
@@ -711,7 +697,7 @@ static int key_unseal(struct trusted_key_payload *p,
/* pull migratable flag out of sealed key */
p->migratable = p->key[--p->key_len];
- kzfree(tb);
+ tpm_buf_destroy(&tb);
return ret;
}
@@ -1016,7 +1002,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(chip, payload, options);
+ ret = tpm2_unseal_trusted(chip, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1032,7 +1018,7 @@ static int trusted_instantiate(struct key *key,
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(chip, payload, options);
+ ret = tpm2_seal_trusted(chip, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
@@ -1228,23 +1214,15 @@ hashalg_fail:
static int __init init_digests(void)
{
- u8 digest[TPM_MAX_DIGEST_SIZE];
- int ret;
int i;
- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
- if (ret < 0)
- return ret;
- if (ret < TPM_MAX_DIGEST_SIZE)
- return -EFAULT;
-
digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
GFP_KERNEL);
if (!digests)
return -ENOMEM;
for (i = 0; i < chip->nr_allocated_banks; i++)
- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
+ digests[i].alg_id = chip->allocated_banks[i].alg_id;
return 0;
}
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
new file mode 100644
index 000000000000..08ec7f48f01d
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ */
+
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted-type.h>
+#include <keys/trusted_tpm.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+/**
+ * tpm_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ unsigned int blob_len;
+ struct tpm_buf buf;
+ u32 hash;
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ if (options->hash == tpm2_hash_map[i].crypto_id) {
+ hash = tpm2_hash_map[i].tpm_id;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
+
+ tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
+ tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
+ tpm_buf_append_u16(&buf, payload->key_len + 1);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+ tpm_buf_append_u8(&buf, payload->migratable);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, hash);
+
+ /* policy */
+ if (options->policydigest_len) {
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ tpm_buf_append(&buf, options->policydigest,
+ options->policydigest_len);
+ } else {
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ tpm_buf_append_u16(&buf, 0);
+ }
+
+ /* public parameters */
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+ payload->blob_len = blob_len;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0) {
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+ rc = -EINVAL;
+ else
+ rc = -EPERM;
+ }
+
+ return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ *
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_send.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ int rc;
+
+ private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+ if (private_len > (payload->blob_len - 2))
+ return -E2BIG;
+
+ public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, payload->blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ *
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_send
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ u16 data_len;
+ u8 *data;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf,
+ options->policyhandle ?
+ options->policyhandle : TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ TPM2_SA_CONTINUE_SESSION,
+ options->blobauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ rc = tpm_send(chip, buf.data, tpm_buf_length(&buf));
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ data_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+ data = &buf.data[TPM_HEADER_SIZE + 6];
+
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ }
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_send.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
+ if (rc)
+ return rc;
+
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+ tpm2_flush_context(chip, blob_handle);
+
+ return rc;
+}
diff --git a/security/lockdown/Kconfig b/security/lockdown/Kconfig
new file mode 100644
index 000000000000..e84ddf484010
--- /dev/null
+++ b/security/lockdown/Kconfig
@@ -0,0 +1,47 @@
+config SECURITY_LOCKDOWN_LSM
+ bool "Basic module for enforcing kernel lockdown"
+ depends on SECURITY
+ select MODULE_SIG if MODULES
+ help
+ Build support for an LSM that enforces a coarse kernel lockdown
+ behaviour.
+
+config SECURITY_LOCKDOWN_LSM_EARLY
+ bool "Enable lockdown LSM early in init"
+ depends on SECURITY_LOCKDOWN_LSM
+ help
+ Enable the lockdown LSM early in boot. This is necessary in order
+ to ensure that lockdown enforcement can be carried out on kernel
+ boot parameters that are otherwise parsed before the security
+ subsystem is fully initialised. If enabled, lockdown will
+ unconditionally be called before any other LSMs.
+
+choice
+ prompt "Kernel default lockdown mode"
+ default LOCK_DOWN_KERNEL_FORCE_NONE
+ depends on SECURITY_LOCKDOWN_LSM
+ help
+ The kernel can be configured to default to differing levels of
+ lockdown.
+
+config LOCK_DOWN_KERNEL_FORCE_NONE
+ bool "None"
+ help
+ No lockdown functionality is enabled by default. Lockdown may be
+ enabled via the kernel commandline or /sys/kernel/security/lockdown.
+
+config LOCK_DOWN_KERNEL_FORCE_INTEGRITY
+ bool "Integrity"
+ help
+ The kernel runs in integrity mode by default. Features that allow
+ the kernel to be modified at runtime are disabled.
+
+config LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY
+ bool "Confidentiality"
+ help
+ The kernel runs in confidentiality mode by default. Features that
+ allow the kernel to be modified at runtime or that permit userland
+ code to read confidential material held inside the kernel are
+ disabled.
+
+endchoice
diff --git a/security/lockdown/Makefile b/security/lockdown/Makefile
new file mode 100644
index 000000000000..e3634b9017e7
--- /dev/null
+++ b/security/lockdown/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown.o
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
new file mode 100644
index 000000000000..5a952617a0eb
--- /dev/null
+++ b/security/lockdown/lockdown.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Lock down the kernel
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/security.h>
+#include <linux/export.h>
+#include <linux/lsm_hooks.h>
+
+static enum lockdown_reason kernel_locked_down;
+
+static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE,
+ LOCKDOWN_INTEGRITY_MAX,
+ LOCKDOWN_CONFIDENTIALITY_MAX};
+
+/*
+ * Put the kernel into lock-down mode.
+ */
+static int lock_kernel_down(const char *where, enum lockdown_reason level)
+{
+ if (kernel_locked_down >= level)
+ return -EPERM;
+
+ kernel_locked_down = level;
+ pr_notice("Kernel is locked down from %s; see man kernel_lockdown.7\n",
+ where);
+ return 0;
+}
+
+static int __init lockdown_param(char *level)
+{
+ if (!level)
+ return -EINVAL;
+
+ if (strcmp(level, "integrity") == 0)
+ lock_kernel_down("command line", LOCKDOWN_INTEGRITY_MAX);
+ else if (strcmp(level, "confidentiality") == 0)
+ lock_kernel_down("command line", LOCKDOWN_CONFIDENTIALITY_MAX);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+early_param("lockdown", lockdown_param);
+
+/**
+ * lockdown_is_locked_down - Find out if the kernel is locked down
+ * @what: Tag to use in notice generated if lockdown is in effect
+ */
+static int lockdown_is_locked_down(enum lockdown_reason what)
+{
+ if (WARN(what >= LOCKDOWN_CONFIDENTIALITY_MAX,
+ "Invalid lockdown reason"))
+ return -EPERM;
+
+ if (kernel_locked_down >= what) {
+ if (lockdown_reasons[what])
+ pr_notice("Lockdown: %s: %s is restricted; see man kernel_lockdown.7\n",
+ current->comm, lockdown_reasons[what]);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static struct security_hook_list lockdown_hooks[] __lsm_ro_after_init = {
+ LSM_HOOK_INIT(locked_down, lockdown_is_locked_down),
+};
+
+static int __init lockdown_lsm_init(void)
+{
+#if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY)
+ lock_kernel_down("Kernel configuration", LOCKDOWN_INTEGRITY_MAX);
+#elif defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY)
+ lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX);
+#endif
+ security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks),
+ "lockdown");
+ return 0;
+}
+
+static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ char temp[80];
+ int i, offset = 0;
+
+ for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) {
+ enum lockdown_reason level = lockdown_levels[i];
+
+ if (lockdown_reasons[level]) {
+ const char *label = lockdown_reasons[level];
+
+ if (kernel_locked_down == level)
+ offset += sprintf(temp+offset, "[%s] ", label);
+ else
+ offset += sprintf(temp+offset, "%s ", label);
+ }
+ }
+
+ /* Convert the last space to a newline if needed. */
+ if (offset > 0)
+ temp[offset-1] = '\n';
+
+ return simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+}
+
+static ssize_t lockdown_write(struct file *file, const char __user *buf,
+ size_t n, loff_t *ppos)
+{
+ char *state;
+ int i, len, err = -EINVAL;
+
+ state = memdup_user_nul(buf, n);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ len = strlen(state);
+ if (len && state[len-1] == '\n') {
+ state[len-1] = '\0';
+ len--;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) {
+ enum lockdown_reason level = lockdown_levels[i];
+ const char *label = lockdown_reasons[level];
+
+ if (label && !strcmp(state, label))
+ err = lock_kernel_down("securityfs", level);
+ }
+
+ kfree(state);
+ return err ? err : n;
+}
+
+static const struct file_operations lockdown_ops = {
+ .read = lockdown_read,
+ .write = lockdown_write,
+};
+
+static int __init lockdown_secfs_init(void)
+{
+ struct dentry *dentry;
+
+ dentry = securityfs_create_file("lockdown", 0600, NULL, NULL,
+ &lockdown_ops);
+ return PTR_ERR_OR_ZERO(dentry);
+}
+
+core_initcall(lockdown_secfs_init);
+
+#ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY
+DEFINE_EARLY_LSM(lockdown) = {
+#else
+DEFINE_LSM(lockdown) = {
+#endif
+ .name = "lockdown",
+ .init = lockdown_lsm_init,
+};
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index e40874373f2b..2d2bf49016f4 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -27,6 +27,7 @@
#include <linux/dccp.h>
#include <linux/sctp.h>
#include <linux/lsm_audit.h>
+#include <linux/security.h>
/**
* ipv4_skb_to_auditdata : fill auditdata from skb
@@ -425,6 +426,10 @@ static void dump_common_audit_data(struct audit_buffer *ab,
a->u.ibendport->dev_name,
a->u.ibendport->port);
break;
+ case LSM_AUDIT_DATA_LOCKDOWN:
+ audit_log_format(ab, " lockdown_reason=");
+ audit_log_string(ab, lockdown_reasons[a->u.reason]);
+ break;
} /* switch (a->type) */
}
diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c
index d568e17dd773..f8bc574cea9c 100644
--- a/security/safesetid/securityfs.c
+++ b/security/safesetid/securityfs.c
@@ -179,15 +179,16 @@ out_free_rule:
* doesn't currently exist, just use a spinlock for now.
*/
mutex_lock(&policy_update_lock);
- rcu_swap_protected(safesetid_setuid_rules, pol,
- lockdep_is_held(&policy_update_lock));
+ pol = rcu_replace_pointer(safesetid_setuid_rules, pol,
+ lockdep_is_held(&policy_update_lock));
mutex_unlock(&policy_update_lock);
err = len;
out_free_buf:
kfree(buf);
out_free_pol:
- release_ruleset(pol);
+ if (pol)
+ release_ruleset(pol);
return err;
}
diff --git a/security/security.c b/security/security.c
index 250ee2d76406..565bc9b67276 100644
--- a/security/security.c
+++ b/security/security.c
@@ -34,6 +34,39 @@
/* How many LSMs were built into the kernel? */
#define LSM_COUNT (__end_lsm_info - __start_lsm_info)
+/*
+ * These are descriptions of the reasons that can be passed to the
+ * security_locked_down() LSM hook. Placing this array here allows
+ * all security modules to use the same descriptions for auditing
+ * purposes.
+ */
+const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+ [LOCKDOWN_NONE] = "none",
+ [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
+ [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+ [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
+ [LOCKDOWN_KEXEC] = "kexec of unsigned images",
+ [LOCKDOWN_HIBERNATION] = "hibernation",
+ [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
+ [LOCKDOWN_IOPORT] = "raw io port access",
+ [LOCKDOWN_MSR] = "raw MSR access",
+ [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
+ [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
+ [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
+ [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
+ [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
+ [LOCKDOWN_DEBUGFS] = "debugfs access",
+ [LOCKDOWN_XMON_WR] = "xmon write access",
+ [LOCKDOWN_INTEGRITY_MAX] = "integrity",
+ [LOCKDOWN_KCORE] = "/proc/kcore access",
+ [LOCKDOWN_KPROBES] = "use of kprobes",
+ [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+ [LOCKDOWN_PERF] = "unsafe use of perf",
+ [LOCKDOWN_TRACEFS] = "use of tracefs",
+ [LOCKDOWN_XMON_RW] = "xmon read and write access",
+ [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
+};
+
struct security_hook_heads security_hook_heads __lsm_ro_after_init;
static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
@@ -277,6 +310,8 @@ static void __init ordered_lsm_parse(const char *order, const char *origin)
static void __init lsm_early_cred(struct cred *cred);
static void __init lsm_early_task(struct task_struct *task);
+static int lsm_append(const char *new, char **result);
+
static void __init ordered_lsm_init(void)
{
struct lsm_info **lsm;
@@ -323,6 +358,26 @@ static void __init ordered_lsm_init(void)
kfree(ordered_lsms);
}
+int __init early_security_init(void)
+{
+ int i;
+ struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
+ struct lsm_info *lsm;
+
+ for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
+ i++)
+ INIT_HLIST_HEAD(&list[i]);
+
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (!lsm->enabled)
+ lsm->enabled = &lsm_enabled_true;
+ prepare_lsm(lsm);
+ initialize_lsm(lsm);
+ }
+
+ return 0;
+}
+
/**
* security_init - initializes the security framework
*
@@ -330,14 +385,18 @@ static void __init ordered_lsm_init(void)
*/
int __init security_init(void)
{
- int i;
- struct hlist_head *list = (struct hlist_head *) &security_hook_heads;
+ struct lsm_info *lsm;
pr_info("Security Framework initializing\n");
- for (i = 0; i < sizeof(security_hook_heads) / sizeof(struct hlist_head);
- i++)
- INIT_HLIST_HEAD(&list[i]);
+ /*
+ * Append the names of the early LSM modules now that kmalloc() is
+ * available
+ */
+ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) {
+ if (lsm->enabled)
+ lsm_append(lsm->name, &lsm_names);
+ }
/* Load LSMs in specified order. */
ordered_lsm_init();
@@ -384,7 +443,7 @@ static bool match_last_lsm(const char *list, const char *lsm)
return !strcmp(last, lsm);
}
-static int lsm_append(char *new, char **result)
+static int lsm_append(const char *new, char **result)
{
char *cp;
@@ -422,8 +481,15 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count,
hooks[i].lsm = lsm;
hlist_add_tail_rcu(&hooks[i].list, hooks[i].head);
}
- if (lsm_append(lsm, &lsm_names) < 0)
- panic("%s - Cannot get early memory.\n", __func__);
+
+ /*
+ * Don't try to append during early_security_init(), we'll come back
+ * and fix this up afterwards.
+ */
+ if (slab_is_available()) {
+ if (lsm_append(lsm, &lsm_names) < 0)
+ panic("%s - Cannot get early memory.\n", __func__);
+ }
}
int call_blocking_lsm_notifier(enum lsm_event event, void *data)
@@ -870,6 +936,12 @@ int security_move_mount(const struct path *from_path, const struct path *to_path
return call_int_hook(move_mount, 0, from_path, to_path);
}
+int security_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ return call_int_hook(path_notify, 0, path, mask, obj_type);
+}
+
int security_inode_alloc(struct inode *inode)
{
int rc = lsm_inode_alloc(inode);
@@ -2358,3 +2430,36 @@ void security_bpf_prog_free(struct bpf_prog_aux *aux)
call_void_hook(bpf_prog_free_security, aux);
}
#endif /* CONFIG_BPF_SYSCALL */
+
+int security_locked_down(enum lockdown_reason what)
+{
+ return call_int_hook(locked_down, 0, what);
+}
+EXPORT_SYMBOL(security_locked_down);
+
+#ifdef CONFIG_PERF_EVENTS
+int security_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ return call_int_hook(perf_event_open, 0, attr, type);
+}
+
+int security_perf_event_alloc(struct perf_event *event)
+{
+ return call_int_hook(perf_event_alloc, 0, event);
+}
+
+void security_perf_event_free(struct perf_event *event)
+{
+ call_void_hook(perf_event_free, event);
+}
+
+int security_perf_event_read(struct perf_event *event)
+{
+ return call_int_hook(perf_event_read, 0, event);
+}
+
+int security_perf_event_write(struct perf_event *event)
+{
+ return call_int_hook(perf_event_write, 0, event);
+}
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index 5711689deb6a..1014cb0ee956 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -42,6 +42,9 @@ config SECURITY_SELINUX_DISABLE
using the selinux=0 boot parameter instead of enabling this
option.
+ WARNING: this option is deprecated and will be removed in a future
+ kernel release.
+
If you are unsure how to answer this question, answer N.
config SECURITY_SELINUX_DEVELOP
@@ -55,7 +58,8 @@ config SECURITY_SELINUX_DEVELOP
kernel will start in permissive mode (log everything, deny nothing)
unless you specify enforcing=1 on the kernel command line. You
can interactively toggle the kernel between enforcing mode and
- permissive mode (if permitted by the policy) via /selinux/enforce.
+ permissive mode (if permitted by the policy) via
+ /sys/fs/selinux/enforce.
config SECURITY_SELINUX_AVC_STATS
bool "NSA SELinux AVC Statistics"
@@ -63,7 +67,7 @@ config SECURITY_SELINUX_AVC_STATS
default y
help
This option collects access vector cache statistics to
- /selinux/avc/cache_stats, which may be monitored via
+ /sys/fs/selinux/avc/cache_stats, which may be monitored via
tools such as avcstat.
config SECURITY_SELINUX_CHECKREQPROT_VALUE
@@ -82,6 +86,29 @@ config SECURITY_SELINUX_CHECKREQPROT_VALUE
default to checking the protection requested by the application.
The checkreqprot flag may be changed from the default via the
'checkreqprot=' boot parameter. It may also be changed at runtime
- via /selinux/checkreqprot if authorized by policy.
+ via /sys/fs/selinux/checkreqprot if authorized by policy.
If you are unsure how to answer this question, answer 0.
+
+config SECURITY_SELINUX_SIDTAB_HASH_BITS
+ int "NSA SELinux sidtab hashtable size"
+ depends on SECURITY_SELINUX
+ range 8 13
+ default 9
+ help
+ This option sets the number of buckets used in the sidtab hashtable
+ to 2^SECURITY_SELINUX_SIDTAB_HASH_BITS buckets. The number of hash
+ collisions may be viewed at /sys/fs/selinux/ss/sidtab_hash_stats. If
+ chain lengths are high (e.g. > 20) then selecting a higher value here
+ will ensure that lookups times are short and stable.
+
+config SECURITY_SELINUX_SID2STR_CACHE_SIZE
+ int "NSA SELinux SID to context string translation cache size"
+ depends on SECURITY_SELINUX
+ default 256
+ help
+ This option defines the size of the internal SID -> context string
+ cache, which improves the performance of context to string
+ conversion. Setting this option to 0 disables the cache completely.
+
+ If unsure, keep the default value.
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index ccf950409384..2000f95fb197 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
- netnode.o netport.o ibpkey.o \
+ netnode.o netport.o \
ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
@@ -14,6 +14,8 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
selinux-$(CONFIG_NETLABEL) += netlabel.o
+selinux-$(CONFIG_SECURITY_INFINIBAND) += ibpkey.o
+
ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index ecd3829996aa..d18cb32a242a 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -424,7 +424,7 @@ static inline int avc_xperms_audit(struct selinux_state *state,
if (likely(!audited))
return 0;
return slow_avc_audit(state, ssid, tsid, tclass, requested,
- audited, denied, result, ad, 0);
+ audited, denied, result, ad);
}
static void avc_node_free(struct rcu_head *rhead)
@@ -617,40 +617,37 @@ static struct avc_node *avc_insert(struct selinux_avc *avc,
struct avc_node *pos, *node = NULL;
int hvalue;
unsigned long flag;
+ spinlock_t *lock;
+ struct hlist_head *head;
if (avc_latest_notif_update(avc, avd->seqno, 1))
- goto out;
+ return NULL;
node = avc_alloc_node(avc);
- if (node) {
- struct hlist_head *head;
- spinlock_t *lock;
- int rc = 0;
-
- hvalue = avc_hash(ssid, tsid, tclass);
- avc_node_populate(node, ssid, tsid, tclass, avd);
- rc = avc_xperms_populate(node, xp_node);
- if (rc) {
- kmem_cache_free(avc_node_cachep, node);
- return NULL;
- }
- head = &avc->avc_cache.slots[hvalue];
- lock = &avc->avc_cache.slots_lock[hvalue];
+ if (!node)
+ return NULL;
- spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, head, list) {
- if (pos->ae.ssid == ssid &&
- pos->ae.tsid == tsid &&
- pos->ae.tclass == tclass) {
- avc_node_replace(avc, node, pos);
- goto found;
- }
+ avc_node_populate(node, ssid, tsid, tclass, avd);
+ if (avc_xperms_populate(node, xp_node)) {
+ avc_node_kill(avc, node);
+ return NULL;
+ }
+
+ hvalue = avc_hash(ssid, tsid, tclass);
+ head = &avc->avc_cache.slots[hvalue];
+ lock = &avc->avc_cache.slots_lock[hvalue];
+ spin_lock_irqsave(lock, flag);
+ hlist_for_each_entry(pos, head, list) {
+ if (pos->ae.ssid == ssid &&
+ pos->ae.tsid == tsid &&
+ pos->ae.tclass == tclass) {
+ avc_node_replace(avc, node, pos);
+ goto found;
}
- hlist_add_head_rcu(&node->list, head);
-found:
- spin_unlock_irqrestore(lock, flag);
}
-out:
+ hlist_add_head_rcu(&node->list, head);
+found:
+ spin_unlock_irqrestore(lock, flag);
return node;
}
@@ -758,8 +755,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
noinline int slow_avc_audit(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
- struct common_audit_data *a,
- unsigned int flags)
+ struct common_audit_data *a)
{
struct common_audit_data stack_data;
struct selinux_audit_data sad;
@@ -772,17 +768,6 @@ noinline int slow_avc_audit(struct selinux_state *state,
a->type = LSM_AUDIT_DATA_NONE;
}
- /*
- * When in a RCU walk do the audit on the RCU retry. This is because
- * the collection of the dname in an inode audit message is not RCU
- * safe. Note this may drop some audits when the situation changes
- * during retry. However this is logically just as if the operation
- * happened a little later.
- */
- if ((a->type == LSM_AUDIT_DATA_INODE) &&
- (flags & MAY_NOT_BLOCK))
- return -ECHILD;
-
sad.tclass = tclass;
sad.requested = requested;
sad.ssid = ssid;
@@ -855,15 +840,14 @@ static int avc_update_node(struct selinux_avc *avc,
/*
* If we are in a non-blocking code path, e.g. VFS RCU walk,
* then we must not add permissions to a cache entry
- * because we cannot safely audit the denial. Otherwise,
+ * because we will not audit the denial. Otherwise,
* during the subsequent blocking retry (e.g. VFS ref walk), we
* will find the permissions already granted in the cache entry
* and won't audit anything at all, leading to silent denials in
* permissive mode that only appear when in enforcing mode.
*
- * See the corresponding handling in slow_avc_audit(), and the
- * logic in selinux_inode_permission for the MAY_NOT_BLOCK flag,
- * which is transliterated into AVC_NONBLOCKING.
+ * See the corresponding handling of MAY_NOT_BLOCK in avc_audit()
+ * and selinux_inode_permission().
*/
if (flags & AVC_NONBLOCKING)
return 0;
@@ -907,7 +891,7 @@ static int avc_update_node(struct selinux_avc *avc,
if (orig->ae.xp_node) {
rc = avc_xperms_populate(node, orig->ae.xp_node);
if (rc) {
- kmem_cache_free(avc_node_cachep, node);
+ avc_node_kill(avc, node);
goto out_unlock;
}
}
@@ -1205,6 +1189,25 @@ int avc_has_perm(struct selinux_state *state, u32 ssid, u32 tsid, u16 tclass,
return rc;
}
+int avc_has_perm_flags(struct selinux_state *state,
+ u32 ssid, u32 tsid, u16 tclass, u32 requested,
+ struct common_audit_data *auditdata,
+ int flags)
+{
+ struct av_decision avd;
+ int rc, rc2;
+
+ rc = avc_has_perm_noaudit(state, ssid, tsid, tclass, requested,
+ (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+ &avd);
+
+ rc2 = avc_audit(state, ssid, tsid, tclass, requested, &avd, rc,
+ auditdata, flags);
+ if (rc2)
+ return rc2;
+ return rc;
+}
+
u32 avc_policy_seqno(struct selinux_state *state)
{
return state->avc->avc_cache.latest_notif;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 74dd46de01b6..d9e8b2131a65 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -89,6 +89,8 @@
#include <linux/kernfs.h>
#include <linux/stringhash.h> /* for hashlen_string() */
#include <uapi/linux/mount.h>
+#include <linux/fsnotify.h>
+#include <linux/fanotify.h>
#include "avc.h"
#include "objsec.h"
@@ -107,7 +109,7 @@ struct selinux_state selinux_state;
static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
-static int selinux_enforcing_boot;
+static int selinux_enforcing_boot __initdata;
static int __init enforcing_setup(char *str)
{
@@ -121,13 +123,13 @@ __setup("enforcing=", enforcing_setup);
#define selinux_enforcing_boot 1
#endif
-int selinux_enabled __lsm_ro_after_init = 1;
+int selinux_enabled_boot __initdata = 1;
#ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
static int __init selinux_enabled_setup(char *str)
{
unsigned long enabled;
if (!kstrtoul(str, 0, &enabled))
- selinux_enabled = enabled ? 1 : 0;
+ selinux_enabled_boot = enabled ? 1 : 0;
return 1;
}
__setup("selinux=", selinux_enabled_setup);
@@ -236,24 +238,6 @@ static inline u32 task_sid(const struct task_struct *task)
return sid;
}
-/* Allocate and free functions for each kind of security blob. */
-
-static int inode_alloc_security(struct inode *inode)
-{
- struct inode_security_struct *isec = selinux_inode(inode);
- u32 sid = current_sid();
-
- spin_lock_init(&isec->lock);
- INIT_LIST_HEAD(&isec->list);
- isec->inode = inode;
- isec->sid = SECINITSID_UNLABELED;
- isec->sclass = SECCLASS_FILE;
- isec->task_sid = sid;
- isec->initialized = LABEL_INVALID;
-
- return 0;
-}
-
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
/*
@@ -270,7 +254,7 @@ static int __inode_security_revalidate(struct inode *inode,
might_sleep_if(may_sleep);
- if (selinux_state.initialized &&
+ if (selinux_initialized(&selinux_state) &&
isec->initialized != LABEL_INITIALIZED) {
if (!may_sleep)
return -ECHILD;
@@ -352,37 +336,6 @@ static void inode_free_security(struct inode *inode)
}
}
-static int file_alloc_security(struct file *file)
-{
- struct file_security_struct *fsec = selinux_file(file);
- u32 sid = current_sid();
-
- fsec->sid = sid;
- fsec->fown_sid = sid;
-
- return 0;
-}
-
-static int superblock_alloc_security(struct super_block *sb)
-{
- struct superblock_security_struct *sbsec;
-
- sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
- if (!sbsec)
- return -ENOMEM;
-
- mutex_init(&sbsec->lock);
- INIT_LIST_HEAD(&sbsec->isec_head);
- spin_lock_init(&sbsec->isec_lock);
- sbsec->sb = sb;
- sbsec->sid = SECINITSID_UNLABELED;
- sbsec->def_sid = SECINITSID_FILE;
- sbsec->mntpoint_sid = SECINITSID_UNLABELED;
- sb->s_security = sbsec;
-
- return 0;
-}
-
static void superblock_free_security(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -404,11 +357,6 @@ static void selinux_free_mnt_opts(void *mnt_opts)
kfree(opts);
}
-static inline int inode_doinit(struct inode *inode)
-{
- return inode_doinit_with_dentry(inode, NULL);
-}
-
enum {
Opt_error = -1,
Opt_context = 0,
@@ -596,7 +544,7 @@ static int sb_finish_set_opts(struct super_block *sb)
inode = igrab(inode);
if (inode) {
if (!IS_PRIVATE(inode))
- inode_doinit(inode);
+ inode_doinit_with_dentry(inode, NULL);
iput(inode);
}
spin_lock(&sbsec->isec_lock);
@@ -657,7 +605,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
mutex_lock(&sbsec->lock);
- if (!selinux_state.initialized) {
+ if (!selinux_initialized(&selinux_state)) {
if (!opts) {
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
@@ -750,6 +698,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
+ !strcmp(sb->s_type->name, "binderfs") ||
!strcmp(sb->s_type->name, "pstore"))
sbsec->flags |= SE_SBGENFS;
@@ -926,7 +875,7 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
* if the parent was able to be mounted it clearly had no special lsm
* mount options. thus we can safely deal with this superblock later
*/
- if (!selinux_state.initialized)
+ if (!selinux_initialized(&selinux_state))
return 0;
/*
@@ -1101,7 +1050,7 @@ static int selinux_sb_show_options(struct seq_file *m, struct super_block *sb)
if (!(sbsec->flags & SE_SBINITIALIZED))
return 0;
- if (!selinux_state.initialized)
+ if (!selinux_initialized(&selinux_state))
return 0;
if (sbsec->flags & FSCONTEXT_MNT) {
@@ -1831,8 +1780,8 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- rc = selinux_determine_inode_label(selinux_cred(current_cred()), dir,
- &dentry->d_name, tclass, &newsid);
+ rc = selinux_determine_inode_label(tsec, dir, &dentry->d_name, tclass,
+ &newsid);
if (rc)
return rc;
@@ -2547,9 +2496,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
{
const struct task_security_struct *tsec = selinux_cred(current_cred());
- struct itimerval itimer;
u32 osid, sid;
- int rc, i;
+ int rc;
osid = tsec->osid;
sid = tsec->sid;
@@ -2567,11 +2515,8 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
rc = avc_has_perm(&selinux_state,
osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
- if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
- memset(&itimer, 0, sizeof itimer);
- for (i = 0; i < 3; i++)
- do_setitimer(i, &itimer, NULL);
- }
+ clear_itimer();
+
spin_lock_irq(&current->sighand->siglock);
if (!fatal_signal_pending(current)) {
flush_sigqueue(&current->pending);
@@ -2594,7 +2539,22 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
static int selinux_sb_alloc_security(struct super_block *sb)
{
- return superblock_alloc_security(sb);
+ struct superblock_security_struct *sbsec;
+
+ sbsec = kzalloc(sizeof(struct superblock_security_struct), GFP_KERNEL);
+ if (!sbsec)
+ return -ENOMEM;
+
+ mutex_init(&sbsec->lock);
+ INIT_LIST_HEAD(&sbsec->isec_head);
+ spin_lock_init(&sbsec->isec_lock);
+ sbsec->sb = sb;
+ sbsec->sid = SECINITSID_UNLABELED;
+ sbsec->def_sid = SECINITSID_FILE;
+ sbsec->mntpoint_sid = SECINITSID_UNLABELED;
+ sb->s_security = sbsec;
+
+ return 0;
}
static void selinux_sb_free_security(struct super_block *sb)
@@ -2764,6 +2724,14 @@ static int selinux_mount(const char *dev_name,
return path_has_perm(cred, path, FILE__MOUNTON);
}
+static int selinux_move_mount(const struct path *from_path,
+ const struct path *to_path)
+{
+ const struct cred *cred = current_cred();
+
+ return path_has_perm(cred, to_path, FILE__MOUNTON);
+}
+
static int selinux_umount(struct vfsmount *mnt, int flags)
{
const struct cred *cred = current_cred();
@@ -2846,7 +2814,18 @@ static int selinux_fs_context_parse_param(struct fs_context *fc,
static int selinux_inode_alloc_security(struct inode *inode)
{
- return inode_alloc_security(inode);
+ struct inode_security_struct *isec = selinux_inode(inode);
+ u32 sid = current_sid();
+
+ spin_lock_init(&isec->lock);
+ INIT_LIST_HEAD(&isec->list);
+ isec->inode = inode;
+ isec->sid = SECINITSID_UNLABELED;
+ isec->sclass = SECCLASS_FILE;
+ isec->task_sid = sid;
+ isec->initialized = LABEL_INVALID;
+
+ return 0;
}
static void selinux_inode_free_security(struct inode *inode)
@@ -2908,8 +2887,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
newsid = tsec->create_sid;
- rc = selinux_determine_inode_label(selinux_cred(current_cred()),
- dir, qstr,
+ rc = selinux_determine_inode_label(tsec, dir, qstr,
inode_mode_to_security_class(inode->i_mode),
&newsid);
if (rc)
@@ -2923,7 +2901,8 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = LABEL_INITIALIZED;
}
- if (!selinux_state.initialized || !(sbsec->flags & SBLABEL_MNT))
+ if (!selinux_initialized(&selinux_state) ||
+ !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (name)
@@ -3006,14 +2985,14 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
if (IS_ERR(isec))
return PTR_ERR(isec);
- return avc_has_perm(&selinux_state,
- sid, isec->sid, isec->sclass, FILE__READ, &ad);
+ return avc_has_perm_flags(&selinux_state,
+ sid, isec->sid, isec->sclass, FILE__READ, &ad,
+ rcu ? MAY_NOT_BLOCK : 0);
}
static noinline int audit_inode_permission(struct inode *inode,
u32 perms, u32 audited, u32 denied,
- int result,
- unsigned flags)
+ int result)
{
struct common_audit_data ad;
struct inode_security_struct *isec = selinux_inode(inode);
@@ -3024,7 +3003,7 @@ static noinline int audit_inode_permission(struct inode *inode,
rc = slow_avc_audit(&selinux_state,
current_sid(), isec->sid, isec->sclass, perms,
- audited, denied, result, &ad, flags);
+ audited, denied, result, &ad);
if (rc)
return rc;
return 0;
@@ -3035,7 +3014,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
const struct cred *cred = current_cred();
u32 perms;
bool from_access;
- unsigned flags = mask & MAY_NOT_BLOCK;
+ bool no_block = mask & MAY_NOT_BLOCK;
struct inode_security_struct *isec;
u32 sid;
struct av_decision avd;
@@ -3057,13 +3036,13 @@ static int selinux_inode_permission(struct inode *inode, int mask)
perms = file_mask_to_av(inode->i_mode, mask);
sid = cred_sid(cred);
- isec = inode_security_rcu(inode, flags & MAY_NOT_BLOCK);
+ isec = inode_security_rcu(inode, no_block);
if (IS_ERR(isec))
return PTR_ERR(isec);
rc = avc_has_perm_noaudit(&selinux_state,
sid, isec->sid, isec->sclass, perms,
- (flags & MAY_NOT_BLOCK) ? AVC_NONBLOCKING : 0,
+ no_block ? AVC_NONBLOCKING : 0,
&avd);
audited = avc_audit_required(perms, &avd, rc,
from_access ? FILE__AUDIT_ACCESS : 0,
@@ -3071,7 +3050,11 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (likely(!audited))
return rc;
- rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
+ /* fall back to ref-walk if we have to generate audit */
+ if (no_block)
+ return -ECHILD;
+
+ rc2 = audit_inode_permission(inode, perms, audited, denied, rc);
if (rc2)
return rc2;
return rc;
@@ -3142,6 +3125,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
}
+ if (!selinux_initialized(&selinux_state))
+ return (inode_owner_or_capable(inode) ? 0 : -EPERM);
+
sbsec = inode->i_sb->s_security;
if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
@@ -3225,6 +3211,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
+ if (!selinux_initialized(&selinux_state)) {
+ /* If we haven't even been initialized, then we can't validate
+ * against a policy, so leave the label as invalid. It may
+ * resolve to a valid label on the next revalidation try if
+ * we've since initialized.
+ */
+ return;
+ }
+
rc = security_context_to_sid_force(&selinux_state, value, size,
&newsid);
if (rc) {
@@ -3275,6 +3270,50 @@ static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
return -EACCES;
}
+static int selinux_path_notify(const struct path *path, u64 mask,
+ unsigned int obj_type)
+{
+ int ret;
+ u32 perm;
+
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = *path;
+
+ /*
+ * Set permission needed based on the type of mark being set.
+ * Performs an additional check for sb watches.
+ */
+ switch (obj_type) {
+ case FSNOTIFY_OBJ_TYPE_VFSMOUNT:
+ perm = FILE__WATCH_MOUNT;
+ break;
+ case FSNOTIFY_OBJ_TYPE_SB:
+ perm = FILE__WATCH_SB;
+ ret = superblock_has_perm(current_cred(), path->dentry->d_sb,
+ FILESYSTEM__WATCH, &ad);
+ if (ret)
+ return ret;
+ break;
+ case FSNOTIFY_OBJ_TYPE_INODE:
+ perm = FILE__WATCH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* blocking watches require the file:watch_with_perm permission */
+ if (mask & (ALL_FSNOTIFY_PERM_EVENTS))
+ perm |= FILE__WATCH_WITH_PERM;
+
+ /* watches on read-like events need the file:watch_reads permission */
+ if (mask & (FS_ACCESS | FS_ACCESS_PERM | FS_CLOSE_NOWRITE))
+ perm |= FILE__WATCH_READS;
+
+ return path_has_perm(current_cred(), path, perm);
+}
+
/*
* Copy the inode security context value to the user.
*
@@ -3403,7 +3442,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
- const struct task_security_struct *tsec = current_security();
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
u32 parent_sid, newsid, clen;
int rc;
char *context;
@@ -3496,7 +3535,13 @@ static int selinux_file_permission(struct file *file, int mask)
static int selinux_file_alloc_security(struct file *file)
{
- return file_alloc_security(file);
+ struct file_security_struct *fsec = selinux_file(file);
+ u32 sid = current_sid();
+
+ fsec->sid = sid;
+ fsec->fown_sid = sid;
+
+ return 0;
}
/*
@@ -3589,7 +3634,7 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
return error;
}
-static int default_noexec;
+static int default_noexec __ro_after_init;
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
{
@@ -4577,8 +4622,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
inet_get_local_port_range(sock_net(sk), &low, &high);
- if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
- snum > high) {
+ if (inet_port_requires_bind_service(sock_net(sk), snum) ||
+ snum < low || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
@@ -5461,44 +5506,6 @@ static int selinux_tun_dev_open(void *security)
return 0;
}
-static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
-{
- int err = 0;
- u32 perm;
- struct nlmsghdr *nlh;
- struct sk_security_struct *sksec = sk->sk_security;
-
- if (skb->len < NLMSG_HDRLEN) {
- err = -EINVAL;
- goto out;
- }
- nlh = nlmsg_hdr(skb);
-
- err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
- if (err) {
- if (err == -EINVAL) {
- pr_warn_ratelimited("SELinux: unrecognized netlink"
- " message: protocol=%hu nlmsg_type=%hu sclass=%s"
- " pig=%d comm=%s\n",
- sk->sk_protocol, nlh->nlmsg_type,
- secclass_map[sksec->sclass - 1].name,
- task_pid_nr(current), current->comm);
- if (!enforcing_enabled(&selinux_state) ||
- security_get_allow_unknown(&selinux_state))
- err = 0;
- }
-
- /* Ignore */
- if (err == -ENOENT)
- err = 0;
- goto out;
- }
-
- err = sock_has_perm(sk, perm);
-out:
- return err;
-}
-
#ifdef CONFIG_NETFILTER
static unsigned int selinux_ip_forward(struct sk_buff *skb,
@@ -5827,7 +5834,40 @@ static unsigned int selinux_ipv6_postroute(void *priv,
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
{
- return selinux_nlmsg_perm(sk, skb);
+ int err = 0;
+ u32 perm;
+ struct nlmsghdr *nlh;
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ if (skb->len < NLMSG_HDRLEN) {
+ err = -EINVAL;
+ goto out;
+ }
+ nlh = nlmsg_hdr(skb);
+
+ err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
+ if (err) {
+ if (err == -EINVAL) {
+ pr_warn_ratelimited("SELinux: unrecognized netlink"
+ " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+ " pid=%d comm=%s\n",
+ sk->sk_protocol, nlh->nlmsg_type,
+ secclass_map[sksec->sclass - 1].name,
+ task_pid_nr(current), current->comm);
+ if (!enforcing_enabled(&selinux_state) ||
+ security_get_allow_unknown(&selinux_state))
+ err = 0;
+ }
+
+ /* Ignore */
+ if (err == -ENOENT)
+ err = 0;
+ goto out;
+ }
+
+ err = sock_has_perm(sk, perm);
+out:
+ return err;
}
static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
@@ -5836,16 +5876,6 @@ static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
isec->sid = current_sid();
}
-static int msg_msg_alloc_security(struct msg_msg *msg)
-{
- struct msg_security_struct *msec;
-
- msec = selinux_msg_msg(msg);
- msec->sid = SECINITSID_UNLABELED;
-
- return 0;
-}
-
static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
u32 perms)
{
@@ -5864,7 +5894,12 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
static int selinux_msg_msg_alloc_security(struct msg_msg *msg)
{
- return msg_msg_alloc_security(msg);
+ struct msg_security_struct *msec;
+
+ msec = selinux_msg_msg(msg);
+ msec->sid = SECINITSID_UNLABELED;
+
+ return 0;
}
/* message queue security operations */
@@ -6741,6 +6776,34 @@ static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
}
#endif
+static int selinux_lockdown(enum lockdown_reason what)
+{
+ struct common_audit_data ad;
+ u32 sid = current_sid();
+ int invalid_reason = (what <= LOCKDOWN_NONE) ||
+ (what == LOCKDOWN_INTEGRITY_MAX) ||
+ (what >= LOCKDOWN_CONFIDENTIALITY_MAX);
+
+ if (WARN(invalid_reason, "Invalid lockdown reason")) {
+ audit_log(audit_context(),
+ GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "lockdown_reason=invalid");
+ return -EINVAL;
+ }
+
+ ad.type = LSM_AUDIT_DATA_LOCKDOWN;
+ ad.u.reason = what;
+
+ if (what <= LOCKDOWN_INTEGRITY_MAX)
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_LOCKDOWN,
+ LOCKDOWN__INTEGRITY, &ad);
+ else
+ return avc_has_perm(&selinux_state,
+ sid, sid, SECCLASS_LOCKDOWN,
+ LOCKDOWN__CONFIDENTIALITY, &ad);
+}
+
struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
.lbs_cred = sizeof(struct task_security_struct),
.lbs_file = sizeof(struct file_security_struct),
@@ -6749,6 +6812,82 @@ struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
.lbs_msg_msg = sizeof(struct msg_security_struct),
};
+#ifdef CONFIG_PERF_EVENTS
+static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
+{
+ u32 requested, sid = current_sid();
+
+ if (type == PERF_SECURITY_OPEN)
+ requested = PERF_EVENT__OPEN;
+ else if (type == PERF_SECURITY_CPU)
+ requested = PERF_EVENT__CPU;
+ else if (type == PERF_SECURITY_KERNEL)
+ requested = PERF_EVENT__KERNEL;
+ else if (type == PERF_SECURITY_TRACEPOINT)
+ requested = PERF_EVENT__TRACEPOINT;
+ else
+ return -EINVAL;
+
+ return avc_has_perm(&selinux_state, sid, sid, SECCLASS_PERF_EVENT,
+ requested, NULL);
+}
+
+static int selinux_perf_event_alloc(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec;
+
+ perfsec = kzalloc(sizeof(*perfsec), GFP_KERNEL);
+ if (!perfsec)
+ return -ENOMEM;
+
+ perfsec->sid = current_sid();
+ event->security = perfsec;
+
+ return 0;
+}
+
+static void selinux_perf_event_free(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+
+ event->security = NULL;
+ kfree(perfsec);
+}
+
+static int selinux_perf_event_read(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL);
+}
+
+static int selinux_perf_event_write(struct perf_event *event)
+{
+ struct perf_event_security_struct *perfsec = event->security;
+ u32 sid = current_sid();
+
+ return avc_has_perm(&selinux_state, sid, perfsec->sid,
+ SECCLASS_PERF_EVENT, PERF_EVENT__WRITE, NULL);
+}
+#endif
+
+/*
+ * IMPORTANT NOTE: When adding new hooks, please be careful to keep this order:
+ * 1. any hooks that don't belong to (2.) or (3.) below,
+ * 2. hooks that both access structures allocated by other hooks, and allocate
+ * structures that can be later accessed by other hooks (mostly "cloning"
+ * hooks),
+ * 3. hooks that only allocate structures that can be later accessed by other
+ * hooks ("allocating" hooks).
+ *
+ * Please follow block comment delimiters in the list to keep this order.
+ *
+ * This ordering is needed for SELinux runtime disable to work at least somewhat
+ * safely. Breaking the ordering rules above might lead to NULL pointer derefs
+ * when disabling SELinux at runtime.
+ */
static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
@@ -6771,12 +6910,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bprm_committing_creds, selinux_bprm_committing_creds),
LSM_HOOK_INIT(bprm_committed_creds, selinux_bprm_committed_creds),
- LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
- LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
-
- LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
LSM_HOOK_INIT(sb_free_security, selinux_sb_free_security),
- LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
LSM_HOOK_INIT(sb_free_mnt_opts, selinux_free_mnt_opts),
LSM_HOOK_INIT(sb_remount, selinux_sb_remount),
LSM_HOOK_INIT(sb_kern_mount, selinux_sb_kern_mount),
@@ -6786,12 +6920,12 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(sb_umount, selinux_umount),
LSM_HOOK_INIT(sb_set_mnt_opts, selinux_set_mnt_opts),
LSM_HOOK_INIT(sb_clone_mnt_opts, selinux_sb_clone_mnt_opts),
- LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
+
+ LSM_HOOK_INIT(move_mount, selinux_move_mount),
LSM_HOOK_INIT(dentry_init_security, selinux_dentry_init_security),
LSM_HOOK_INIT(dentry_create_files_as, selinux_dentry_create_files_as),
- LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
LSM_HOOK_INIT(inode_free_security, selinux_inode_free_security),
LSM_HOOK_INIT(inode_init_security, selinux_inode_init_security),
LSM_HOOK_INIT(inode_create, selinux_inode_create),
@@ -6818,6 +6952,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(inode_getsecid, selinux_inode_getsecid),
LSM_HOOK_INIT(inode_copy_up, selinux_inode_copy_up),
LSM_HOOK_INIT(inode_copy_up_xattr, selinux_inode_copy_up_xattr),
+ LSM_HOOK_INIT(path_notify, selinux_path_notify),
LSM_HOOK_INIT(kernfs_init_security, selinux_kernfs_init_security),
@@ -6862,21 +6997,15 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ipc_permission, selinux_ipc_permission),
LSM_HOOK_INIT(ipc_getsecid, selinux_ipc_getsecid),
- LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
-
- LSM_HOOK_INIT(msg_queue_alloc_security,
- selinux_msg_queue_alloc_security),
LSM_HOOK_INIT(msg_queue_associate, selinux_msg_queue_associate),
LSM_HOOK_INIT(msg_queue_msgctl, selinux_msg_queue_msgctl),
LSM_HOOK_INIT(msg_queue_msgsnd, selinux_msg_queue_msgsnd),
LSM_HOOK_INIT(msg_queue_msgrcv, selinux_msg_queue_msgrcv),
- LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
LSM_HOOK_INIT(shm_associate, selinux_shm_associate),
LSM_HOOK_INIT(shm_shmctl, selinux_shm_shmctl),
LSM_HOOK_INIT(shm_shmat, selinux_shm_shmat),
- LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
LSM_HOOK_INIT(sem_associate, selinux_sem_associate),
LSM_HOOK_INIT(sem_semctl, selinux_sem_semctl),
LSM_HOOK_INIT(sem_semop, selinux_sem_semop),
@@ -6887,13 +7016,11 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(setprocattr, selinux_setprocattr),
LSM_HOOK_INIT(ismaclabel, selinux_ismaclabel),
- LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
LSM_HOOK_INIT(secctx_to_secid, selinux_secctx_to_secid),
LSM_HOOK_INIT(release_secctx, selinux_release_secctx),
LSM_HOOK_INIT(inode_invalidate_secctx, selinux_inode_invalidate_secctx),
LSM_HOOK_INIT(inode_notifysecctx, selinux_inode_notifysecctx),
LSM_HOOK_INIT(inode_setsecctx, selinux_inode_setsecctx),
- LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
LSM_HOOK_INIT(unix_stream_connect, selinux_socket_unix_stream_connect),
LSM_HOOK_INIT(unix_may_send, selinux_socket_unix_may_send),
@@ -6916,7 +7043,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(socket_getpeersec_stream,
selinux_socket_getpeersec_stream),
LSM_HOOK_INIT(socket_getpeersec_dgram, selinux_socket_getpeersec_dgram),
- LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
LSM_HOOK_INIT(sk_free_security, selinux_sk_free_security),
LSM_HOOK_INIT(sk_clone_security, selinux_sk_clone_security),
LSM_HOOK_INIT(sk_getsecid, selinux_sk_getsecid),
@@ -6931,7 +7057,6 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(secmark_refcount_inc, selinux_secmark_refcount_inc),
LSM_HOOK_INIT(secmark_refcount_dec, selinux_secmark_refcount_dec),
LSM_HOOK_INIT(req_classify_flow, selinux_req_classify_flow),
- LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
LSM_HOOK_INIT(tun_dev_free_security, selinux_tun_dev_free_security),
LSM_HOOK_INIT(tun_dev_create, selinux_tun_dev_create),
LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue),
@@ -6941,17 +7066,11 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ib_pkey_access, selinux_ib_pkey_access),
LSM_HOOK_INIT(ib_endport_manage_subnet,
selinux_ib_endport_manage_subnet),
- LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
LSM_HOOK_INIT(ib_free_security, selinux_ib_free_security),
#endif
#ifdef CONFIG_SECURITY_NETWORK_XFRM
- LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
- LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
LSM_HOOK_INIT(xfrm_policy_free_security, selinux_xfrm_policy_free),
LSM_HOOK_INIT(xfrm_policy_delete_security, selinux_xfrm_policy_delete),
- LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
- LSM_HOOK_INIT(xfrm_state_alloc_acquire,
- selinux_xfrm_state_alloc_acquire),
LSM_HOOK_INIT(xfrm_state_free_security, selinux_xfrm_state_free),
LSM_HOOK_INIT(xfrm_state_delete_security, selinux_xfrm_state_delete),
LSM_HOOK_INIT(xfrm_policy_lookup, selinux_xfrm_policy_lookup),
@@ -6961,14 +7080,12 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
#endif
#ifdef CONFIG_KEYS
- LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
LSM_HOOK_INIT(key_free, selinux_key_free),
LSM_HOOK_INIT(key_permission, selinux_key_permission),
LSM_HOOK_INIT(key_getsecurity, selinux_key_getsecurity),
#endif
#ifdef CONFIG_AUDIT
- LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
LSM_HOOK_INIT(audit_rule_known, selinux_audit_rule_known),
LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match),
LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free),
@@ -6978,11 +7095,66 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(bpf, selinux_bpf),
LSM_HOOK_INIT(bpf_map, selinux_bpf_map),
LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog),
- LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
- LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
#endif
+
+#ifdef CONFIG_PERF_EVENTS
+ LSM_HOOK_INIT(perf_event_open, selinux_perf_event_open),
+ LSM_HOOK_INIT(perf_event_free, selinux_perf_event_free),
+ LSM_HOOK_INIT(perf_event_read, selinux_perf_event_read),
+ LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
+#endif
+
+ LSM_HOOK_INIT(locked_down, selinux_lockdown),
+
+ /*
+ * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
+ */
+ LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
+ LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
+ LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
+ LSM_HOOK_INIT(sb_add_mnt_opt, selinux_add_mnt_opt),
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone),
+#endif
+
+ /*
+ * PUT "ALLOCATING" HOOKS HERE
+ */
+ LSM_HOOK_INIT(msg_msg_alloc_security, selinux_msg_msg_alloc_security),
+ LSM_HOOK_INIT(msg_queue_alloc_security,
+ selinux_msg_queue_alloc_security),
+ LSM_HOOK_INIT(shm_alloc_security, selinux_shm_alloc_security),
+ LSM_HOOK_INIT(sb_alloc_security, selinux_sb_alloc_security),
+ LSM_HOOK_INIT(inode_alloc_security, selinux_inode_alloc_security),
+ LSM_HOOK_INIT(sem_alloc_security, selinux_sem_alloc_security),
+ LSM_HOOK_INIT(secid_to_secctx, selinux_secid_to_secctx),
+ LSM_HOOK_INIT(inode_getsecctx, selinux_inode_getsecctx),
+ LSM_HOOK_INIT(sk_alloc_security, selinux_sk_alloc_security),
+ LSM_HOOK_INIT(tun_dev_alloc_security, selinux_tun_dev_alloc_security),
+#ifdef CONFIG_SECURITY_INFINIBAND
+ LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security),
+#endif
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc),
+ LSM_HOOK_INIT(xfrm_state_alloc, selinux_xfrm_state_alloc),
+ LSM_HOOK_INIT(xfrm_state_alloc_acquire,
+ selinux_xfrm_state_alloc_acquire),
+#endif
+#ifdef CONFIG_KEYS
+ LSM_HOOK_INIT(key_alloc, selinux_key_alloc),
+#endif
+#ifdef CONFIG_AUDIT
+ LSM_HOOK_INIT(audit_rule_init, selinux_audit_rule_init),
+#endif
+#ifdef CONFIG_BPF_SYSCALL
+ LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc),
+ LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc),
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
+#endif
};
static __init int selinux_init(void)
@@ -7045,7 +7217,7 @@ void selinux_complete_init(void)
DEFINE_LSM(selinux) = {
.name = "selinux",
.flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE,
- .enabled = &selinux_enabled,
+ .enabled = &selinux_enabled_boot,
.blobs = &selinux_blob_sizes,
.init = selinux_init,
};
@@ -7114,7 +7286,7 @@ static int __init selinux_nf_ip_init(void)
{
int err;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
pr_debug("SELinux: Registering netfilter hooks\n");
@@ -7147,30 +7319,32 @@ static void selinux_nf_ip_exit(void)
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
int selinux_disable(struct selinux_state *state)
{
- if (state->initialized) {
+ if (selinux_initialized(state)) {
/* Not permitted after initial policy load. */
return -EINVAL;
}
- if (state->disabled) {
+ if (selinux_disabled(state)) {
/* Only do this once. */
return -EINVAL;
}
- state->disabled = 1;
+ selinux_mark_disabled(state);
pr_info("SELinux: Disabled at runtime.\n");
- selinux_enabled = 0;
+ /*
+ * Unregister netfilter hooks.
+ * Must be done before security_delete_hooks() to avoid breaking
+ * runtime disable.
+ */
+ selinux_nf_ip_exit();
security_delete_hooks(selinux_hooks, ARRAY_SIZE(selinux_hooks));
/* Try to destroy the avc node cache */
avc_disable();
- /* Unregister netfilter hooks. */
- selinux_nf_ip_exit();
-
/* Unregister selinuxfs. */
exit_sel_fs();
diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
index de92365e4324..f68a7617cfb9 100644
--- a/security/selinux/ibpkey.c
+++ b/security/selinux/ibpkey.c
@@ -222,7 +222,7 @@ static __init int sel_ib_pkey_init(void)
{
int iter;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) {
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 7be0e1e90e8b..cf4cc3ef959b 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -100,8 +100,7 @@ static inline u32 avc_audit_required(u32 requested,
int slow_avc_audit(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass,
u32 requested, u32 audited, u32 denied, int result,
- struct common_audit_data *a,
- unsigned flags);
+ struct common_audit_data *a);
/**
* avc_audit - Audit the granting or denial of permissions.
@@ -135,9 +134,12 @@ static inline int avc_audit(struct selinux_state *state,
audited = avc_audit_required(requested, avd, result, 0, &denied);
if (likely(!audited))
return 0;
+ /* fall back to ref-walk if we have to generate audit */
+ if (flags & MAY_NOT_BLOCK)
+ return -ECHILD;
return slow_avc_audit(state, ssid, tsid, tclass,
requested, audited, denied, result,
- a, flags);
+ a);
}
#define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -153,6 +155,11 @@ int avc_has_perm(struct selinux_state *state,
u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct common_audit_data *auditdata);
+int avc_has_perm_flags(struct selinux_state *state,
+ u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata,
+ int flags);
int avc_has_extended_perms(struct selinux_state *state,
u32 ssid, u32 tsid, u16 tclass, u32 requested,
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 201f7e588a29..986f3ac14282 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -7,7 +7,8 @@
#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
"rename", "execute", "quotaon", "mounton", "audit_access", \
- "open", "execmod"
+ "open", "execmod", "watch", "watch_mount", "watch_sb", \
+ "watch_with_perm", "watch_reads"
#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
"listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
@@ -60,7 +61,7 @@ struct security_class_mapping secclass_map[] = {
{ "filesystem",
{ "mount", "remount", "unmount", "getattr",
"relabelfrom", "relabelto", "associate", "quotamod",
- "quotaget", NULL } },
+ "quotaget", "watch", NULL } },
{ "file",
{ COMMON_FILE_PERMS,
"execute_no_trans", "entrypoint", NULL } },
@@ -243,6 +244,10 @@ struct security_class_mapping secclass_map[] = {
{"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
{ "xdp_socket",
{ COMMON_SOCK_PERMS, NULL } },
+ { "perf_event",
+ {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
+ { "lockdown",
+ { "integrity", "confidentiality", NULL } },
{ NULL }
};
diff --git a/security/selinux/include/ibpkey.h b/security/selinux/include/ibpkey.h
index a2ebe397bcb7..e6ac1d23320b 100644
--- a/security/selinux/include/ibpkey.h
+++ b/security/selinux/include/ibpkey.h
@@ -14,8 +14,19 @@
#ifndef _SELINUX_IB_PKEY_H
#define _SELINUX_IB_PKEY_H
+#ifdef CONFIG_SECURITY_INFINIBAND
void sel_ib_pkey_flush(void);
-
int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid);
+#else
+static inline void sel_ib_pkey_flush(void)
+{
+ return;
+}
+static inline int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid)
+{
+ *sid = SECINITSID_UNLABELED;
+ return 0;
+}
+#endif
#endif
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 91c5395dd20c..330b7b6d44e0 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -35,17 +35,7 @@ struct task_security_struct {
u32 create_sid; /* fscreate SID */
u32 keycreate_sid; /* keycreate SID */
u32 sockcreate_sid; /* fscreate SID */
-};
-
-/*
- * get the subjective security ID of the current task
- */
-static inline u32 current_sid(void)
-{
- const struct task_security_struct *tsec = current_security();
-
- return tsec->sid;
-}
+} __randomize_layout;
enum label_initialized {
LABEL_INVALID, /* invalid or not initialized */
@@ -151,7 +141,11 @@ struct pkey_security_struct {
};
struct bpf_security_struct {
- u32 sid; /*SID of bpf obj creater*/
+ u32 sid; /* SID of bpf obj creator */
+};
+
+struct perf_event_security_struct {
+ u32 sid; /* SID of perf_event obj creator */
};
extern struct lsm_blob_sizes selinux_blob_sizes;
@@ -185,4 +179,14 @@ static inline struct ipc_security_struct *selinux_ipc(
return ipc->security + selinux_blob_sizes.lbs_ipc;
}
+/*
+ * get the subjective security ID of the current task
+ */
+static inline u32 current_sid(void)
+{
+ const struct task_security_struct *tsec = selinux_cred(current_cred());
+
+ return tsec->sid;
+}
+
#endif /* _SELINUX_OBJSEC_H_ */
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 111121281c47..a39f9565d80b 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -40,10 +40,11 @@
#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
#define POLICYDB_VERSION_XPERMS_IOCTL 30
#define POLICYDB_VERSION_INFINIBAND 31
+#define POLICYDB_VERSION_GLBLUB 32
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_INFINIBAND
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_GLBLUB
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
@@ -68,7 +69,7 @@
struct netlbl_lsm_secattr;
-extern int selinux_enabled;
+extern int selinux_enabled_boot;
/* Policy capabilities */
enum {
@@ -98,7 +99,9 @@ struct selinux_avc;
struct selinux_ss;
struct selinux_state {
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
bool disabled;
+#endif
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
bool enforcing;
#endif
@@ -107,22 +110,34 @@ struct selinux_state {
bool policycap[__POLICYDB_CAPABILITY_MAX];
struct selinux_avc *avc;
struct selinux_ss *ss;
-};
+} __randomize_layout;
void selinux_ss_init(struct selinux_ss **ss);
void selinux_avc_init(struct selinux_avc **avc);
extern struct selinux_state selinux_state;
+static inline bool selinux_initialized(const struct selinux_state *state)
+{
+ /* do a synchronized load to avoid race conditions */
+ return smp_load_acquire(&state->initialized);
+}
+
+static inline void selinux_mark_initialized(struct selinux_state *state)
+{
+ /* do a synchronized write to avoid race conditions */
+ smp_store_release(&state->initialized, true);
+}
+
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
static inline bool enforcing_enabled(struct selinux_state *state)
{
- return state->enforcing;
+ return READ_ONCE(state->enforcing);
}
static inline void enforcing_set(struct selinux_state *state, bool value)
{
- state->enforcing = value;
+ WRITE_ONCE(state->enforcing, value);
}
#else
static inline bool enforcing_enabled(struct selinux_state *state)
@@ -135,6 +150,23 @@ static inline void enforcing_set(struct selinux_state *state, bool value)
}
#endif
+#ifdef CONFIG_SECURITY_SELINUX_DISABLE
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+ return READ_ONCE(state->disabled);
+}
+
+static inline void selinux_mark_disabled(struct selinux_state *state)
+{
+ WRITE_ONCE(state->disabled, true);
+}
+#else
+static inline bool selinux_disabled(struct selinux_state *state)
+{
+ return false;
+}
+#endif
+
static inline bool selinux_policycap_netpeer(void)
{
struct selinux_state *state = &selinux_state;
@@ -394,5 +426,6 @@ extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern void avtab_cache_init(void);
extern void ebitmap_cache_init(void);
extern void hashtab_cache_init(void);
+extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 9cb83eeee1d9..15b8c1bcd7d0 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -132,9 +132,9 @@ static void sel_netif_destroy(struct sel_netif *netif)
*/
static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
{
- int ret;
+ int ret = 0;
struct sel_netif *netif;
- struct sel_netif *new = NULL;
+ struct sel_netif *new;
struct net_device *dev;
/* NOTE: we always use init's network namespace since we don't
@@ -151,32 +151,27 @@ static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
netif = sel_netif_find(ns, ifindex);
if (netif != NULL) {
*sid = netif->nsec.sid;
- ret = 0;
goto out;
}
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL) {
- ret = -ENOMEM;
- goto out;
- }
- ret = security_netif_sid(&selinux_state, dev->name, &new->nsec.sid);
- if (ret != 0)
- goto out;
- new->nsec.ns = ns;
- new->nsec.ifindex = ifindex;
- ret = sel_netif_insert(new);
+
+ ret = security_netif_sid(&selinux_state, dev->name, sid);
if (ret != 0)
goto out;
- *sid = new->nsec.sid;
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->nsec.ns = ns;
+ new->nsec.ifindex = ifindex;
+ new->nsec.sid = *sid;
+ if (sel_netif_insert(new))
+ kfree(new);
+ }
out:
spin_unlock_bh(&sel_netif_lock);
dev_put(dev);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
__func__, ifindex);
- kfree(new);
- }
return ret;
}
@@ -271,7 +266,7 @@ static __init int sel_netif_init(void)
{
int i;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index cae1fcaffd1a..dff587d1e164 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -189,9 +189,9 @@ static void sel_netnode_insert(struct sel_netnode *node)
*/
static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
{
- int ret = -ENOMEM;
+ int ret;
struct sel_netnode *node;
- struct sel_netnode *new = NULL;
+ struct sel_netnode *new;
spin_lock_bh(&sel_netnode_lock);
node = sel_netnode_find(addr, family);
@@ -200,38 +200,36 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
spin_unlock_bh(&sel_netnode_lock);
return 0;
}
+
new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL)
- goto out;
switch (family) {
case PF_INET:
ret = security_node_sid(&selinux_state, PF_INET,
addr, sizeof(struct in_addr), sid);
- new->nsec.addr.ipv4 = *(__be32 *)addr;
+ if (new)
+ new->nsec.addr.ipv4 = *(__be32 *)addr;
break;
case PF_INET6:
ret = security_node_sid(&selinux_state, PF_INET6,
addr, sizeof(struct in6_addr), sid);
- new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
+ if (new)
+ new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
ret = -EINVAL;
}
- if (ret != 0)
- goto out;
-
- new->nsec.family = family;
- new->nsec.sid = *sid;
- sel_netnode_insert(new);
+ if (ret == 0 && new) {
+ new->nsec.family = family;
+ new->nsec.sid = *sid;
+ sel_netnode_insert(new);
+ } else
+ kfree(new);
-out:
spin_unlock_bh(&sel_netnode_lock);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network node label\n",
__func__);
- kfree(new);
- }
return ret;
}
@@ -293,7 +291,7 @@ static __init int sel_netnode_init(void)
{
int iter;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 364b6d5b8968..de727f7489b7 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -137,9 +137,9 @@ static void sel_netport_insert(struct sel_netport *port)
*/
static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
{
- int ret = -ENOMEM;
+ int ret;
struct sel_netport *port;
- struct sel_netport *new = NULL;
+ struct sel_netport *new;
spin_lock_bh(&sel_netport_lock);
port = sel_netport_find(protocol, pnum);
@@ -148,25 +148,23 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
spin_unlock_bh(&sel_netport_lock);
return 0;
}
- new = kzalloc(sizeof(*new), GFP_ATOMIC);
- if (new == NULL)
- goto out;
+
ret = security_port_sid(&selinux_state, protocol, pnum, sid);
if (ret != 0)
goto out;
-
- new->psec.port = pnum;
- new->psec.protocol = protocol;
- new->psec.sid = *sid;
- sel_netport_insert(new);
+ new = kzalloc(sizeof(*new), GFP_ATOMIC);
+ if (new) {
+ new->psec.port = pnum;
+ new->psec.protocol = protocol;
+ new->psec.sid = *sid;
+ sel_netport_insert(new);
+ }
out:
spin_unlock_bh(&sel_netport_lock);
- if (unlikely(ret)) {
+ if (unlikely(ret))
pr_warn("SELinux: failure in %s(), unable to determine network port label\n",
__func__);
- kfree(new);
- }
return ret;
}
@@ -227,7 +225,7 @@ static __init int sel_netport_init(void)
{
int iter;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) {
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 58345ba0528e..b69231918686 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -83,6 +83,11 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETNEXTHOP, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELLINKPROP, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELVLAN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETVLAN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -166,7 +171,7 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
* structures at the top of this file with the new mappings
* before updating the BUILD_BUG_ON() macro!
*/
- BUILD_BUG_ON(RTM_MAX != (RTM_NEWNEXTHOP + 3));
+ BUILD_BUG_ON(RTM_MAX != (RTM_NEWVLAN + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
break;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index e6c7643c3fc0..79c710911a3c 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -168,11 +168,10 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
goto out;
audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
"enforcing=%d old_enforcing=%d auid=%u ses=%u"
- " enabled=%d old-enabled=%d lsm=selinux res=1",
+ " enabled=1 old-enabled=1 lsm=selinux res=1",
new_value, old_value,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current),
- selinux_enabled, selinux_enabled);
+ audit_get_sessionid(current));
enforcing_set(state, new_value);
if (new_value)
avc_ss_reset(state->avc, 0);
@@ -282,6 +281,13 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
int new_value;
int enforcing;
+ /* NOTE: we are now officially considering runtime disable as
+ * deprecated, and using it will become increasingly painful
+ * (e.g. sleeping/blocking) as we progress through future
+ * kernel releases until eventually it is removed
+ */
+ pr_err("SELinux: Runtime disable is deprecated, use selinux=0 on the kernel cmdline.\n");
+
if (count >= PAGE_SIZE)
return -ENOMEM;
@@ -304,10 +310,10 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
goto out;
audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_STATUS,
"enforcing=%d old_enforcing=%d auid=%u ses=%u"
- " enabled=%d old-enabled=%d lsm=selinux res=1",
+ " enabled=0 old-enabled=1 lsm=selinux res=1",
enforcing, enforcing,
from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current), 0, 1);
+ audit_get_sessionid(current));
}
length = count;
@@ -548,10 +554,6 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (*ppos != 0)
goto out;
- length = -EFBIG;
- if (count > 64 * 1024 * 1024)
- goto out;
-
length = -ENOMEM;
data = vmalloc(count);
if (!data)
@@ -1486,6 +1488,32 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
return length;
}
+static ssize_t sel_read_sidtab_hash_stats(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct selinux_state *state = fsi->state;
+ char *page;
+ ssize_t length;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ length = security_sidtab_hash_stats(state, page);
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos, page,
+ length);
+ free_page((unsigned long)page);
+
+ return length;
+}
+
+static const struct file_operations sel_sidtab_hash_stats_ops = {
+ .read = sel_read_sidtab_hash_stats,
+ .llseek = generic_file_llseek,
+};
+
static const struct file_operations sel_avc_cache_threshold_ops = {
.read = sel_read_avc_cache_threshold,
.write = sel_write_avc_cache_threshold,
@@ -1603,6 +1631,37 @@ static int sel_make_avc_files(struct dentry *dir)
return 0;
}
+static int sel_make_ss_files(struct dentry *dir)
+{
+ struct super_block *sb = dir->d_sb;
+ struct selinux_fs_info *fsi = sb->s_fs_info;
+ int i;
+ static struct tree_descr files[] = {
+ { "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(files); i++) {
+ struct inode *inode;
+ struct dentry *dentry;
+
+ dentry = d_alloc_name(dir, files[i].name);
+ if (!dentry)
+ return -ENOMEM;
+
+ inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+ if (!inode) {
+ dput(dentry);
+ return -ENOMEM;
+ }
+
+ inode->i_fop = files[i].ops;
+ inode->i_ino = ++fsi->last_ino;
+ d_add(dentry, inode);
+ }
+
+ return 0;
+}
+
static ssize_t sel_read_initcon(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -1676,7 +1735,7 @@ static ssize_t sel_read_class(struct file *file, char __user *buf,
{
unsigned long ino = file_inode(file)->i_ino;
char res[TMPBUFLEN];
- ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+ ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
return simple_read_from_buffer(buf, count, ppos, res, len);
}
@@ -1690,7 +1749,7 @@ static ssize_t sel_read_perm(struct file *file, char __user *buf,
{
unsigned long ino = file_inode(file)->i_ino;
char res[TMPBUFLEN];
- ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+ ssize_t len = scnprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
return simple_read_from_buffer(buf, count, ppos, res, len);
}
@@ -1967,6 +2026,14 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc)
}
ret = sel_make_avc_files(dentry);
+
+ dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ ret = sel_make_ss_files(dentry);
if (ret)
goto err;
@@ -2044,7 +2111,7 @@ static int __init init_sel_fs(void)
sizeof(NULL_FILE_NAME)-1);
int err;
- if (!selinux_enabled)
+ if (!selinux_enabled_boot)
return 0;
err = sysfs_create_mount_point(fs_kobj, "selinux");
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 2260c44a568c..3ba044fe02ed 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -31,6 +31,7 @@ struct context {
u32 len; /* length of string in bytes */
struct mls_range range;
char *str; /* string representation if context cannot be mapped. */
+ u32 hash; /* a hash of the string representation */
};
static inline void mls_context_init(struct context *c)
@@ -95,6 +96,38 @@ out:
return rc;
}
+
+static inline int mls_context_glblub(struct context *dst,
+ struct context *c1, struct context *c2)
+{
+ struct mls_range *dr = &dst->range, *r1 = &c1->range, *r2 = &c2->range;
+ int rc = 0;
+
+ if (r1->level[1].sens < r2->level[0].sens ||
+ r2->level[1].sens < r1->level[0].sens)
+ /* These ranges have no common sensitivities */
+ return -EINVAL;
+
+ /* Take the greatest of the low */
+ dr->level[0].sens = max(r1->level[0].sens, r2->level[0].sens);
+
+ /* Take the least of the high */
+ dr->level[1].sens = min(r1->level[1].sens, r2->level[1].sens);
+
+ rc = ebitmap_and(&dr->level[0].cat,
+ &r1->level[0].cat, &r2->level[0].cat);
+ if (rc)
+ goto out;
+
+ rc = ebitmap_and(&dr->level[1].cat,
+ &r1->level[1].cat, &r2->level[1].cat);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
@@ -136,12 +169,13 @@ static inline int context_cpy(struct context *dst, struct context *src)
kfree(dst->str);
return rc;
}
+ dst->hash = src->hash;
return 0;
}
static inline void context_destroy(struct context *c)
{
- c->user = c->role = c->type = 0;
+ c->user = c->role = c->type = c->hash = 0;
kfree(c->str);
c->str = NULL;
c->len = 0;
@@ -150,6 +184,8 @@ static inline void context_destroy(struct context *c)
static inline int context_cmp(struct context *c1, struct context *c2)
{
+ if (c1->hash && c2->hash && (c1->hash != c2->hash))
+ return 0;
if (c1->len && c2->len)
return (c1->len == c2->len && !strcmp(c1->str, c2->str));
if (c1->len || c2->len)
@@ -160,5 +196,10 @@ static inline int context_cmp(struct context *c1, struct context *c2)
mls_context_cmp(c1, c2));
}
+static inline unsigned int context_compute_hash(const char *s)
+{
+ return full_name_hash(NULL, s, strlen(s));
+}
+
#endif /* _SS_CONTEXT_H_ */
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 09929fc5ab47..c8c3663111e2 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -77,6 +77,24 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
return 0;
}
+int ebitmap_and(struct ebitmap *dst, struct ebitmap *e1, struct ebitmap *e2)
+{
+ struct ebitmap_node *n;
+ int bit, rc;
+
+ ebitmap_init(dst);
+
+ ebitmap_for_each_positive_bit(e1, n, bit) {
+ if (ebitmap_get_bit(e2, bit)) {
+ rc = ebitmap_set_bit(dst, bit, 1);
+ if (rc < 0)
+ return rc;
+ }
+ }
+ return 0;
+}
+
+
#ifdef CONFIG_NETLABEL
/**
* ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 6aa7cf6a2197..9a23b81b8832 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -124,6 +124,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
+int ebitmap_and(struct ebitmap *dst, struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 5e05f5b902d7..ec5e3d1da9ac 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -529,6 +529,9 @@ int mls_compute_sid(struct policydb *p,
return mls_context_cpy_high(newcontext, tcontext);
case DEFAULT_TARGET_LOW_HIGH:
return mls_context_cpy(newcontext, tcontext);
+ case DEFAULT_GLBLUB:
+ return mls_context_glblub(newcontext,
+ scontext, tcontext);
}
/* Fallthrough */
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 624ccc6ac744..2aa7f2e1a8e7 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -160,6 +160,11 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_GLBLUB,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -177,6 +182,195 @@ static struct policydb_compat_info *policydb_lookup_compat(int version)
}
/*
+ * The following *_destroy functions are used to
+ * free any memory allocated for each kind of
+ * symbol data in the policy database.
+ */
+
+static int perm_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int common_destroy(void *key, void *datum, void *p)
+{
+ struct common_datum *comdatum;
+
+ kfree(key);
+ if (datum) {
+ comdatum = datum;
+ hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(comdatum->permissions.table);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
+static int cls_destroy(void *key, void *datum, void *p)
+{
+ struct class_datum *cladatum;
+ struct constraint_node *constraint, *ctemp;
+ struct constraint_expr *e, *etmp;
+
+ kfree(key);
+ if (datum) {
+ cladatum = datum;
+ hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(cladatum->permissions.table);
+ constraint = cladatum->constraints;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+
+ constraint = cladatum->validatetrans;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
+ }
+ kfree(cladatum->comkey);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int role_destroy(void *key, void *datum, void *p)
+{
+ struct role_datum *role;
+
+ kfree(key);
+ if (datum) {
+ role = datum;
+ ebitmap_destroy(&role->dominates);
+ ebitmap_destroy(&role->types);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int type_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int user_destroy(void *key, void *datum, void *p)
+{
+ struct user_datum *usrdatum;
+
+ kfree(key);
+ if (datum) {
+ usrdatum = datum;
+ ebitmap_destroy(&usrdatum->roles);
+ ebitmap_destroy(&usrdatum->range.level[0].cat);
+ ebitmap_destroy(&usrdatum->range.level[1].cat);
+ ebitmap_destroy(&usrdatum->dfltlevel.cat);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int sens_destroy(void *key, void *datum, void *p)
+{
+ struct level_datum *levdatum;
+
+ kfree(key);
+ if (datum) {
+ levdatum = datum;
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
+ kfree(levdatum->level);
+ }
+ kfree(datum);
+ return 0;
+}
+
+static int cat_destroy(void *key, void *datum, void *p)
+{
+ kfree(key);
+ kfree(datum);
+ return 0;
+}
+
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
+{
+ common_destroy,
+ cls_destroy,
+ role_destroy,
+ type_destroy,
+ user_destroy,
+ cond_destroy_bool,
+ sens_destroy,
+ cat_destroy,
+};
+
+static int filenametr_destroy(void *key, void *datum, void *p)
+{
+ struct filename_trans *ft = key;
+
+ kfree(ft->name);
+ kfree(key);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static void ocontext_destroy(struct ocontext *c, int i)
+{
+ if (!c)
+ return;
+
+ context_destroy(&c->context[0]);
+ context_destroy(&c->context[1]);
+ if (i == OCON_ISID || i == OCON_FS ||
+ i == OCON_NETIF || i == OCON_FSUSE)
+ kfree(c->u.name);
+ kfree(c);
+}
+
+/*
* Initialize the role table.
*/
static int roles_init(struct policydb *p)
@@ -250,6 +444,7 @@ static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
static u32 rangetr_hash(struct hashtab *h, const void *k)
{
const struct range_trans *key = k;
+
return (key->source_type + (key->target_type << 3) +
(key->target_class << 5)) & (h->size - 1);
}
@@ -299,7 +494,8 @@ static int policydb_init(struct policydb *p)
if (rc)
goto out;
- p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
+ p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp,
+ (1 << 10));
if (!p->filename_trans) {
rc = -ENOMEM;
goto out;
@@ -319,8 +515,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
@@ -395,7 +593,7 @@ static int type_index(void *key, void *datum, void *datap)
|| typdatum->bounds > p->p_types.nprim)
return -EINVAL;
p->sym_val_to_name[SYM_TYPES][typdatum->value - 1] = key;
- p->type_val_to_struct_array[typdatum->value - 1] = typdatum;
+ p->type_val_to_struct[typdatum->value - 1] = typdatum;
}
return 0;
@@ -473,9 +671,9 @@ static void hash_eval(struct hashtab *h, const char *hash_name)
struct hashtab_info info;
hashtab_stat(h, &info);
- pr_debug("SELinux: %s: %d entries and %d/%d buckets used, "
- "longest chain length %d\n", hash_name, h->nel,
- info.slots_used, h->size, info.max_chain_len);
+ pr_debug("SELinux: %s: %d entries and %d/%d buckets used, longest chain length %d\n",
+ hash_name, h->nel, info.slots_used, h->size,
+ info.max_chain_len);
}
static void symtab_hash_eval(struct symtab *s)
@@ -537,10 +735,10 @@ static int policydb_index(struct policydb *p)
if (!p->user_val_to_struct)
return -ENOMEM;
- p->type_val_to_struct_array = kvcalloc(p->p_types.nprim,
- sizeof(*p->type_val_to_struct_array),
- GFP_KERNEL);
- if (!p->type_val_to_struct_array)
+ p->type_val_to_struct = kvcalloc(p->p_types.nprim,
+ sizeof(*p->type_val_to_struct),
+ GFP_KERNEL);
+ if (!p->type_val_to_struct)
return -ENOMEM;
rc = cond_init_bool_indexes(p);
@@ -564,193 +762,6 @@ out:
}
/*
- * The following *_destroy functions are used to
- * free any memory allocated for each kind of
- * symbol data in the policy database.
- */
-
-static int perm_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int common_destroy(void *key, void *datum, void *p)
-{
- struct common_datum *comdatum;
-
- kfree(key);
- if (datum) {
- comdatum = datum;
- hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(comdatum->permissions.table);
- }
- kfree(datum);
- return 0;
-}
-
-static void constraint_expr_destroy(struct constraint_expr *expr)
-{
- if (expr) {
- ebitmap_destroy(&expr->names);
- if (expr->type_names) {
- ebitmap_destroy(&expr->type_names->types);
- ebitmap_destroy(&expr->type_names->negset);
- kfree(expr->type_names);
- }
- kfree(expr);
- }
-}
-
-static int cls_destroy(void *key, void *datum, void *p)
-{
- struct class_datum *cladatum;
- struct constraint_node *constraint, *ctemp;
- struct constraint_expr *e, *etmp;
-
- kfree(key);
- if (datum) {
- cladatum = datum;
- hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(cladatum->permissions.table);
- constraint = cladatum->constraints;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- etmp = e;
- e = e->next;
- constraint_expr_destroy(etmp);
- }
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
- }
-
- constraint = cladatum->validatetrans;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- etmp = e;
- e = e->next;
- constraint_expr_destroy(etmp);
- }
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
- }
- kfree(cladatum->comkey);
- }
- kfree(datum);
- return 0;
-}
-
-static int role_destroy(void *key, void *datum, void *p)
-{
- struct role_datum *role;
-
- kfree(key);
- if (datum) {
- role = datum;
- ebitmap_destroy(&role->dominates);
- ebitmap_destroy(&role->types);
- }
- kfree(datum);
- return 0;
-}
-
-static int type_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int user_destroy(void *key, void *datum, void *p)
-{
- struct user_datum *usrdatum;
-
- kfree(key);
- if (datum) {
- usrdatum = datum;
- ebitmap_destroy(&usrdatum->roles);
- ebitmap_destroy(&usrdatum->range.level[0].cat);
- ebitmap_destroy(&usrdatum->range.level[1].cat);
- ebitmap_destroy(&usrdatum->dfltlevel.cat);
- }
- kfree(datum);
- return 0;
-}
-
-static int sens_destroy(void *key, void *datum, void *p)
-{
- struct level_datum *levdatum;
-
- kfree(key);
- if (datum) {
- levdatum = datum;
- if (levdatum->level)
- ebitmap_destroy(&levdatum->level->cat);
- kfree(levdatum->level);
- }
- kfree(datum);
- return 0;
-}
-
-static int cat_destroy(void *key, void *datum, void *p)
-{
- kfree(key);
- kfree(datum);
- return 0;
-}
-
-static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
-{
- common_destroy,
- cls_destroy,
- role_destroy,
- type_destroy,
- user_destroy,
- cond_destroy_bool,
- sens_destroy,
- cat_destroy,
-};
-
-static int filenametr_destroy(void *key, void *datum, void *p)
-{
- struct filename_trans *ft = key;
- kfree(ft->name);
- kfree(key);
- kfree(datum);
- cond_resched();
- return 0;
-}
-
-static int range_tr_destroy(void *key, void *datum, void *p)
-{
- struct mls_range *rt = datum;
- kfree(key);
- ebitmap_destroy(&rt->level[0].cat);
- ebitmap_destroy(&rt->level[1].cat);
- kfree(datum);
- cond_resched();
- return 0;
-}
-
-static void ocontext_destroy(struct ocontext *c, int i)
-{
- if (!c)
- return;
-
- context_destroy(&c->context[0]);
- context_destroy(&c->context[1]);
- if (i == OCON_ISID || i == OCON_FS ||
- i == OCON_NETIF || i == OCON_FSUSE)
- kfree(c->u.name);
- kfree(c);
-}
-
-/*
* Free any memory allocated by a policy database structure.
*/
void policydb_destroy(struct policydb *p)
@@ -773,7 +784,7 @@ void policydb_destroy(struct policydb *p)
kfree(p->class_val_to_struct);
kfree(p->role_val_to_struct);
kfree(p->user_val_to_struct);
- kvfree(p->type_val_to_struct_array);
+ kvfree(p->type_val_to_struct);
avtab_destroy(&p->te_avtab);
@@ -867,6 +878,11 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
sidtab_destroy(s);
goto out;
}
+ rc = context_add_hash(p, &c->context[0]);
+ if (rc) {
+ sidtab_destroy(s);
+ goto out;
+ }
rc = sidtab_set_initial(s, c->sid[0], &c->context[0]);
if (rc) {
@@ -1718,7 +1734,7 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
return -EINVAL;
}
- upper = p->type_val_to_struct_array[upper->bounds - 1];
+ upper = p->type_val_to_struct[upper->bounds - 1];
BUG_ON(!upper);
if (upper->attribute) {
@@ -2643,7 +2659,7 @@ static int role_trans_write(struct policydb *p, void *fp)
{
struct role_trans *r = p->role_tr;
struct role_trans *tr;
- u32 buf[3];
+ __le32 buf[3];
size_t nel;
int rc;
@@ -2675,7 +2691,7 @@ static int role_trans_write(struct policydb *p, void *fp)
static int role_allow_write(struct role_allow *r, void *fp)
{
struct role_allow *ra;
- u32 buf[2];
+ __le32 buf[2];
size_t nel;
int rc;
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index fcc6366b447f..69b24191fa38 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -69,6 +69,7 @@ struct class_datum {
#define DEFAULT_TARGET_LOW 4
#define DEFAULT_TARGET_HIGH 5
#define DEFAULT_TARGET_LOW_HIGH 6
+#define DEFAULT_GLBLUB 7
char default_range;
};
@@ -253,7 +254,7 @@ struct policydb {
struct class_datum **class_val_to_struct;
struct role_datum **role_val_to_struct;
struct user_datum **user_val_to_struct;
- struct type_datum **type_val_to_struct_array;
+ struct type_datum **type_val_to_struct;
/* type enforcement access vectors and transitions */
struct avtab te_avtab;
@@ -306,7 +307,7 @@ struct policydb {
u16 process_class;
u32 process_trans_perms;
-};
+} __randomize_layout;
extern void policydb_destroy(struct policydb *p);
extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index d61563a3695e..216ce602a2b5 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -91,6 +91,12 @@ static int context_struct_to_string(struct policydb *policydb,
char **scontext,
u32 *scontext_len);
+static int sidtab_entry_to_string(struct policydb *policydb,
+ struct sidtab *sidtab,
+ struct sidtab_entry *entry,
+ char **scontext,
+ u32 *scontext_len);
+
static void context_struct_compute_av(struct policydb *policydb,
struct context *scontext,
struct context *tcontext,
@@ -542,13 +548,13 @@ static void type_attribute_bounds_av(struct policydb *policydb,
struct type_datum *target;
u32 masked = 0;
- source = policydb->type_val_to_struct_array[scontext->type - 1];
+ source = policydb->type_val_to_struct[scontext->type - 1];
BUG_ON(!source);
if (!source->bounds)
return;
- target = policydb->type_val_to_struct_array[tcontext->type - 1];
+ target = policydb->type_val_to_struct[tcontext->type - 1];
BUG_ON(!target);
memset(&lo_avd, 0, sizeof(lo_avd));
@@ -716,20 +722,21 @@ static void context_struct_compute_av(struct policydb *policydb,
}
static int security_validtrans_handle_fail(struct selinux_state *state,
- struct context *ocontext,
- struct context *ncontext,
- struct context *tcontext,
+ struct sidtab_entry *oentry,
+ struct sidtab_entry *nentry,
+ struct sidtab_entry *tentry,
u16 tclass)
{
struct policydb *p = &state->ss->policydb;
+ struct sidtab *sidtab = state->ss->sidtab;
char *o = NULL, *n = NULL, *t = NULL;
u32 olen, nlen, tlen;
- if (context_struct_to_string(p, ocontext, &o, &olen))
+ if (sidtab_entry_to_string(p, sidtab, oentry, &o, &olen))
goto out;
- if (context_struct_to_string(p, ncontext, &n, &nlen))
+ if (sidtab_entry_to_string(p, sidtab, nentry, &n, &nlen))
goto out;
- if (context_struct_to_string(p, tcontext, &t, &tlen))
+ if (sidtab_entry_to_string(p, sidtab, tentry, &t, &tlen))
goto out;
audit_log(audit_context(), GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_validate_transition seresult=denied"
@@ -751,16 +758,16 @@ static int security_compute_validatetrans(struct selinux_state *state,
{
struct policydb *policydb;
struct sidtab *sidtab;
- struct context *ocontext;
- struct context *ncontext;
- struct context *tcontext;
+ struct sidtab_entry *oentry;
+ struct sidtab_entry *nentry;
+ struct sidtab_entry *tentry;
struct class_datum *tclass_datum;
struct constraint_node *constraint;
u16 tclass;
int rc = 0;
- if (!state->initialized)
+ if (!selinux_initialized(state))
return 0;
read_lock(&state->ss->policy_rwlock);
@@ -779,24 +786,24 @@ static int security_compute_validatetrans(struct selinux_state *state,
}
tclass_datum = policydb->class_val_to_struct[tclass - 1];
- ocontext = sidtab_search(sidtab, oldsid);
- if (!ocontext) {
+ oentry = sidtab_search_entry(sidtab, oldsid);
+ if (!oentry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, oldsid);
rc = -EINVAL;
goto out;
}
- ncontext = sidtab_search(sidtab, newsid);
- if (!ncontext) {
+ nentry = sidtab_search_entry(sidtab, newsid);
+ if (!nentry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, newsid);
rc = -EINVAL;
goto out;
}
- tcontext = sidtab_search(sidtab, tasksid);
- if (!tcontext) {
+ tentry = sidtab_search_entry(sidtab, tasksid);
+ if (!tentry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tasksid);
rc = -EINVAL;
@@ -805,15 +812,16 @@ static int security_compute_validatetrans(struct selinux_state *state,
constraint = tclass_datum->validatetrans;
while (constraint) {
- if (!constraint_expr_eval(policydb, ocontext, ncontext,
- tcontext, constraint->expr)) {
+ if (!constraint_expr_eval(policydb, &oentry->context,
+ &nentry->context, &tentry->context,
+ constraint->expr)) {
if (user)
rc = -EPERM;
else
rc = security_validtrans_handle_fail(state,
- ocontext,
- ncontext,
- tcontext,
+ oentry,
+ nentry,
+ tentry,
tclass);
goto out;
}
@@ -855,12 +863,12 @@ int security_bounded_transition(struct selinux_state *state,
{
struct policydb *policydb;
struct sidtab *sidtab;
- struct context *old_context, *new_context;
+ struct sidtab_entry *old_entry, *new_entry;
struct type_datum *type;
int index;
int rc;
- if (!state->initialized)
+ if (!selinux_initialized(state))
return 0;
read_lock(&state->ss->policy_rwlock);
@@ -869,16 +877,16 @@ int security_bounded_transition(struct selinux_state *state,
sidtab = state->ss->sidtab;
rc = -EINVAL;
- old_context = sidtab_search(sidtab, old_sid);
- if (!old_context) {
+ old_entry = sidtab_search_entry(sidtab, old_sid);
+ if (!old_entry) {
pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, old_sid);
goto out;
}
rc = -EINVAL;
- new_context = sidtab_search(sidtab, new_sid);
- if (!new_context) {
+ new_entry = sidtab_search_entry(sidtab, new_sid);
+ if (!new_entry) {
pr_err("SELinux: %s: unrecognized SID %u\n",
__func__, new_sid);
goto out;
@@ -886,12 +894,12 @@ int security_bounded_transition(struct selinux_state *state,
rc = 0;
/* type/domain unchanged */
- if (old_context->type == new_context->type)
+ if (old_entry->context.type == new_entry->context.type)
goto out;
- index = new_context->type;
+ index = new_entry->context.type;
while (true) {
- type = policydb->type_val_to_struct_array[index - 1];
+ type = policydb->type_val_to_struct[index - 1];
BUG_ON(!type);
/* not bounded anymore */
@@ -901,7 +909,7 @@ int security_bounded_transition(struct selinux_state *state,
/* @newsid is bounded by @oldsid */
rc = 0;
- if (type->bounds == old_context->type)
+ if (type->bounds == old_entry->context.type)
break;
index = type->bounds;
@@ -912,10 +920,10 @@ int security_bounded_transition(struct selinux_state *state,
char *new_name = NULL;
u32 length;
- if (!context_struct_to_string(policydb, old_context,
- &old_name, &length) &&
- !context_struct_to_string(policydb, new_context,
- &new_name, &length)) {
+ if (!sidtab_entry_to_string(policydb, sidtab, old_entry,
+ &old_name, &length) &&
+ !sidtab_entry_to_string(policydb, sidtab, new_entry,
+ &new_name, &length)) {
audit_log(audit_context(),
GFP_ATOMIC, AUDIT_SELINUX_ERR,
"op=security_bounded_transition "
@@ -1019,7 +1027,7 @@ void security_compute_xperms_decision(struct selinux_state *state,
memset(xpermd->dontaudit->p, 0, sizeof(xpermd->dontaudit->p));
read_lock(&state->ss->policy_rwlock);
- if (!state->initialized)
+ if (!selinux_initialized(state))
goto allow;
policydb = &state->ss->policydb;
@@ -1104,7 +1112,7 @@ void security_compute_av(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
avd_init(state, avd);
xperms->len = 0;
- if (!state->initialized)
+ if (!selinux_initialized(state))
goto allow;
policydb = &state->ss->policydb;
@@ -1158,7 +1166,7 @@ void security_compute_av_user(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
avd_init(state, avd);
- if (!state->initialized)
+ if (!selinux_initialized(state))
goto allow;
policydb = &state->ss->policydb;
@@ -1255,8 +1263,42 @@ static int context_struct_to_string(struct policydb *p,
return 0;
}
+static int sidtab_entry_to_string(struct policydb *p,
+ struct sidtab *sidtab,
+ struct sidtab_entry *entry,
+ char **scontext, u32 *scontext_len)
+{
+ int rc = sidtab_sid2str_get(sidtab, entry, scontext, scontext_len);
+
+ if (rc != -ENOENT)
+ return rc;
+
+ rc = context_struct_to_string(p, &entry->context, scontext,
+ scontext_len);
+ if (!rc && scontext)
+ sidtab_sid2str_put(sidtab, entry, *scontext, *scontext_len);
+ return rc;
+}
+
#include "initial_sid_to_string.h"
+int security_sidtab_hash_stats(struct selinux_state *state, char *page)
+{
+ int rc;
+
+ if (!selinux_initialized(state)) {
+ pr_err("SELinux: %s: called before initial load_policy\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ read_lock(&state->ss->policy_rwlock);
+ rc = sidtab_hash_stats(state->ss->sidtab, page);
+ read_unlock(&state->ss->policy_rwlock);
+
+ return rc;
+}
+
const char *security_get_initial_sid_context(u32 sid)
{
if (unlikely(sid > SECINITSID_NUM))
@@ -1271,14 +1313,14 @@ static int security_sid_to_context_core(struct selinux_state *state,
{
struct policydb *policydb;
struct sidtab *sidtab;
- struct context *context;
+ struct sidtab_entry *entry;
int rc = 0;
if (scontext)
*scontext = NULL;
*scontext_len = 0;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
if (sid <= SECINITSID_NUM) {
char *scontextp;
@@ -1302,21 +1344,23 @@ static int security_sid_to_context_core(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
sidtab = state->ss->sidtab;
+
if (force)
- context = sidtab_search_force(sidtab, sid);
+ entry = sidtab_search_entry_force(sidtab, sid);
else
- context = sidtab_search(sidtab, sid);
- if (!context) {
+ entry = sidtab_search_entry(sidtab, sid);
+ if (!entry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, sid);
rc = -EINVAL;
goto out_unlock;
}
- if (only_invalid && !context->len)
- rc = 0;
- else
- rc = context_struct_to_string(policydb, context, scontext,
- scontext_len);
+ if (only_invalid && !entry->context.len)
+ goto out_unlock;
+
+ rc = sidtab_entry_to_string(policydb, sidtab, entry, scontext,
+ scontext_len);
+
out_unlock:
read_unlock(&state->ss->policy_rwlock);
out:
@@ -1449,6 +1493,42 @@ out:
return rc;
}
+int context_add_hash(struct policydb *policydb,
+ struct context *context)
+{
+ int rc;
+ char *str;
+ int len;
+
+ if (context->str) {
+ context->hash = context_compute_hash(context->str);
+ } else {
+ rc = context_struct_to_string(policydb, context,
+ &str, &len);
+ if (rc)
+ return rc;
+ context->hash = context_compute_hash(str);
+ kfree(str);
+ }
+ return 0;
+}
+
+static int context_struct_to_sid(struct selinux_state *state,
+ struct context *context, u32 *sid)
+{
+ int rc;
+ struct sidtab *sidtab = state->ss->sidtab;
+ struct policydb *policydb = &state->ss->policydb;
+
+ if (!context->hash) {
+ rc = context_add_hash(policydb, context);
+ if (rc)
+ return rc;
+ }
+
+ return sidtab_context_to_sid(sidtab, context, sid);
+}
+
static int security_context_to_sid_core(struct selinux_state *state,
const char *scontext, u32 scontext_len,
u32 *sid, u32 def_sid, gfp_t gfp_flags,
@@ -1469,7 +1549,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
if (!scontext2)
return -ENOMEM;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
@@ -1501,7 +1581,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
str = NULL;
} else if (rc)
goto out_unlock;
- rc = sidtab_context_to_sid(sidtab, &context, sid);
+ rc = context_struct_to_sid(state, &context, sid);
context_destroy(&context);
out_unlock:
read_unlock(&state->ss->policy_rwlock);
@@ -1574,19 +1654,20 @@ int security_context_to_sid_force(struct selinux_state *state,
static int compute_sid_handle_invalid_context(
struct selinux_state *state,
- struct context *scontext,
- struct context *tcontext,
+ struct sidtab_entry *sentry,
+ struct sidtab_entry *tentry,
u16 tclass,
struct context *newcontext)
{
struct policydb *policydb = &state->ss->policydb;
+ struct sidtab *sidtab = state->ss->sidtab;
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
struct audit_buffer *ab;
- if (context_struct_to_string(policydb, scontext, &s, &slen))
+ if (sidtab_entry_to_string(policydb, sidtab, sentry, &s, &slen))
goto out;
- if (context_struct_to_string(policydb, tcontext, &t, &tlen))
+ if (sidtab_entry_to_string(policydb, sidtab, tentry, &t, &tlen))
goto out;
if (context_struct_to_string(policydb, newcontext, &n, &nlen))
goto out;
@@ -1645,7 +1726,8 @@ static int security_compute_sid(struct selinux_state *state,
struct policydb *policydb;
struct sidtab *sidtab;
struct class_datum *cladatum = NULL;
- struct context *scontext = NULL, *tcontext = NULL, newcontext;
+ struct context *scontext, *tcontext, newcontext;
+ struct sidtab_entry *sentry, *tentry;
struct role_trans *roletr = NULL;
struct avtab_key avkey;
struct avtab_datum *avdatum;
@@ -1654,7 +1736,7 @@ static int security_compute_sid(struct selinux_state *state,
int rc = 0;
bool sock;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
switch (orig_tclass) {
case SECCLASS_PROCESS: /* kernel value */
*out_sid = ssid;
@@ -1682,21 +1764,24 @@ static int security_compute_sid(struct selinux_state *state,
policydb = &state->ss->policydb;
sidtab = state->ss->sidtab;
- scontext = sidtab_search(sidtab, ssid);
- if (!scontext) {
+ sentry = sidtab_search_entry(sidtab, ssid);
+ if (!sentry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, ssid);
rc = -EINVAL;
goto out_unlock;
}
- tcontext = sidtab_search(sidtab, tsid);
- if (!tcontext) {
+ tentry = sidtab_search_entry(sidtab, tsid);
+ if (!tentry) {
pr_err("SELinux: %s: unrecognized SID %d\n",
__func__, tsid);
rc = -EINVAL;
goto out_unlock;
}
+ scontext = &sentry->context;
+ tcontext = &tentry->context;
+
if (tclass && tclass <= policydb->p_classes.nprim)
cladatum = policydb->class_val_to_struct[tclass - 1];
@@ -1797,15 +1882,13 @@ static int security_compute_sid(struct selinux_state *state,
/* Check the validity of the context. */
if (!policydb_context_isvalid(policydb, &newcontext)) {
- rc = compute_sid_handle_invalid_context(state, scontext,
- tcontext,
- tclass,
- &newcontext);
+ rc = compute_sid_handle_invalid_context(state, sentry, tentry,
+ tclass, &newcontext);
if (rc)
goto out_unlock;
}
/* Obtain the sid for the context. */
- rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+ rc = context_struct_to_sid(state, &newcontext, out_sid);
out_unlock:
read_unlock(&state->ss->policy_rwlock);
context_destroy(&newcontext);
@@ -1946,10 +2029,18 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
rc = string_to_context_struct(args->newp, NULL, s,
newc, SECSID_NULL);
if (rc == -EINVAL) {
- /* Retain string representation for later mapping. */
+ /*
+ * Retain string representation for later mapping.
+ *
+ * IMPORTANT: We need to copy the contents of oldc->str
+ * back into s again because string_to_context_struct()
+ * may have garbled it.
+ */
+ memcpy(s, oldc->str, oldc->len);
context_init(newc);
newc->str = s;
newc->len = oldc->len;
+ newc->hash = oldc->hash;
return 0;
}
kfree(s);
@@ -2026,6 +2117,10 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
goto bad;
}
+ rc = context_add_hash(args->newp, newc);
+ if (rc)
+ goto bad;
+
return 0;
bad:
/* Map old representation to string and save it. */
@@ -2035,6 +2130,7 @@ bad:
context_destroy(newc);
newc->str = s;
newc->len = len;
+ newc->hash = context_compute_hash(s);
pr_info("SELinux: Context %s became invalid (unmapped).\n",
newc->str);
return 0;
@@ -2087,26 +2183,17 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
- oldpolicydb = kcalloc(2, sizeof(*oldpolicydb), GFP_KERNEL);
- if (!oldpolicydb) {
- rc = -ENOMEM;
- goto out;
- }
- newpolicydb = oldpolicydb + 1;
-
policydb = &state->ss->policydb;
newsidtab = kmalloc(sizeof(*newsidtab), GFP_KERNEL);
- if (!newsidtab) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!newsidtab)
+ return -ENOMEM;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
rc = policydb_read(policydb, fp);
if (rc) {
kfree(newsidtab);
- goto out;
+ return rc;
}
policydb->len = len;
@@ -2115,19 +2202,19 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
if (rc) {
kfree(newsidtab);
policydb_destroy(policydb);
- goto out;
+ return rc;
}
rc = policydb_load_isids(policydb, newsidtab);
if (rc) {
kfree(newsidtab);
policydb_destroy(policydb);
- goto out;
+ return rc;
}
state->ss->sidtab = newsidtab;
security_load_policycaps(state);
- state->initialized = 1;
+ selinux_mark_initialized(state);
seqno = ++state->ss->latest_granting;
selinux_complete_init();
avc_ss_reset(state->avc, seqno);
@@ -2135,9 +2222,16 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
selinux_status_update_policyload(state, seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- goto out;
+ return 0;
}
+ oldpolicydb = kcalloc(2, sizeof(*oldpolicydb), GFP_KERNEL);
+ if (!oldpolicydb) {
+ kfree(newsidtab);
+ return -ENOMEM;
+ }
+ newpolicydb = oldpolicydb + 1;
+
rc = policydb_read(newpolicydb, fp);
if (rc) {
kfree(newsidtab);
@@ -2253,14 +2347,12 @@ int security_port_sid(struct selinux_state *state,
u8 protocol, u16 port, u32 *out_sid)
{
struct policydb *policydb;
- struct sidtab *sidtab;
struct ocontext *c;
int rc = 0;
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_PORT];
while (c) {
@@ -2273,8 +2365,7 @@ int security_port_sid(struct selinux_state *state,
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab,
- &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
@@ -2299,14 +2390,12 @@ int security_ib_pkey_sid(struct selinux_state *state,
u64 subnet_prefix, u16 pkey_num, u32 *out_sid)
{
struct policydb *policydb;
- struct sidtab *sidtab;
struct ocontext *c;
int rc = 0;
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_IBPKEY];
while (c) {
@@ -2320,7 +2409,7 @@ int security_ib_pkey_sid(struct selinux_state *state,
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab,
+ rc = context_struct_to_sid(state,
&c->context[0],
&c->sid[0]);
if (rc)
@@ -2345,14 +2434,12 @@ int security_ib_endport_sid(struct selinux_state *state,
const char *dev_name, u8 port_num, u32 *out_sid)
{
struct policydb *policydb;
- struct sidtab *sidtab;
struct ocontext *c;
int rc = 0;
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_IBENDPORT];
while (c) {
@@ -2367,8 +2454,7 @@ int security_ib_endport_sid(struct selinux_state *state,
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab,
- &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
@@ -2391,14 +2477,12 @@ int security_netif_sid(struct selinux_state *state,
char *name, u32 *if_sid)
{
struct policydb *policydb;
- struct sidtab *sidtab;
int rc = 0;
struct ocontext *c;
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_NETIF];
while (c) {
@@ -2409,13 +2493,11 @@ int security_netif_sid(struct selinux_state *state,
if (c) {
if (!c->sid[0] || !c->sid[1]) {
- rc = sidtab_context_to_sid(sidtab,
- &c->context[0],
- &c->sid[0]);
+ rc = context_struct_to_sid(state, &c->context[0],
+ &c->sid[0]);
if (rc)
goto out;
- rc = sidtab_context_to_sid(sidtab,
- &c->context[1],
+ rc = context_struct_to_sid(state, &c->context[1],
&c->sid[1]);
if (rc)
goto out;
@@ -2456,14 +2538,12 @@ int security_node_sid(struct selinux_state *state,
u32 *out_sid)
{
struct policydb *policydb;
- struct sidtab *sidtab;
int rc;
struct ocontext *c;
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
switch (domain) {
case AF_INET: {
@@ -2505,7 +2585,7 @@ int security_node_sid(struct selinux_state *state,
if (c) {
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab,
+ rc = context_struct_to_sid(state,
&c->context[0],
&c->sid[0]);
if (rc)
@@ -2557,7 +2637,7 @@ int security_get_user_sids(struct selinux_state *state,
*sids = NULL;
*nel = 0;
- if (!state->initialized)
+ if (!selinux_initialized(state))
goto out;
read_lock(&state->ss->policy_rwlock);
@@ -2589,12 +2669,17 @@ int security_get_user_sids(struct selinux_state *state,
usercon.role = i + 1;
ebitmap_for_each_positive_bit(&role->types, tnode, j) {
usercon.type = j + 1;
+ /*
+ * The same context struct is reused here so the hash
+ * must be reset.
+ */
+ usercon.hash = 0;
if (mls_setup_user_range(policydb, fromcon, user,
&usercon))
continue;
- rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+ rc = context_struct_to_sid(state, &usercon, &sid);
if (rc)
goto out_unlock;
if (mynel < maxnel) {
@@ -2665,7 +2750,6 @@ static inline int __security_genfs_sid(struct selinux_state *state,
u32 *sid)
{
struct policydb *policydb = &state->ss->policydb;
- struct sidtab *sidtab = state->ss->sidtab;
int len;
u16 sclass;
struct genfs *genfs;
@@ -2700,7 +2784,7 @@ static inline int __security_genfs_sid(struct selinux_state *state,
goto out;
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab, &c->context[0], &c->sid[0]);
+ rc = context_struct_to_sid(state, &c->context[0], &c->sid[0]);
if (rc)
goto out;
}
@@ -2742,7 +2826,6 @@ int security_genfs_sid(struct selinux_state *state,
int security_fs_use(struct selinux_state *state, struct super_block *sb)
{
struct policydb *policydb;
- struct sidtab *sidtab;
int rc = 0;
struct ocontext *c;
struct superblock_security_struct *sbsec = sb->s_security;
@@ -2751,7 +2834,6 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_FSUSE];
while (c) {
@@ -2763,7 +2845,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
if (c) {
sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(sidtab, &c->context[0],
+ rc = context_struct_to_sid(state, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
@@ -2791,7 +2873,7 @@ int security_get_bools(struct selinux_state *state,
struct policydb *policydb;
int i, rc;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
*len = 0;
*names = NULL;
*values = NULL;
@@ -2966,7 +3048,7 @@ int security_sid_mls_copy(struct selinux_state *state,
int rc;
rc = 0;
- if (!state->initialized || !policydb->mls_enabled) {
+ if (!selinux_initialized(state) || !policydb->mls_enabled) {
*new_sid = sid;
goto out;
}
@@ -3019,8 +3101,7 @@ int security_sid_mls_copy(struct selinux_state *state,
goto out_unlock;
}
}
-
- rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+ rc = context_struct_to_sid(state, &newcon, new_sid);
out_unlock:
read_unlock(&state->ss->policy_rwlock);
context_destroy(&newcon);
@@ -3134,7 +3215,7 @@ int security_get_classes(struct selinux_state *state,
struct policydb *policydb = &state->ss->policydb;
int rc;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
*nclasses = 0;
*classes = NULL;
return 0;
@@ -3283,7 +3364,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
*rule = NULL;
- if (!state->initialized)
+ if (!selinux_initialized(state))
return -EOPNOTSUPP;
switch (field) {
@@ -3582,7 +3663,7 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
struct context *ctx;
struct context ctx_new;
- if (!state->initialized) {
+ if (!selinux_initialized(state)) {
*sid = SECSID_NULL;
return 0;
}
@@ -3613,7 +3694,7 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
if (!mls_context_isvalid(policydb, &ctx_new))
goto out_free;
- rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+ rc = context_struct_to_sid(state, &ctx_new, sid);
if (rc)
goto out_free;
@@ -3649,7 +3730,7 @@ int security_netlbl_sid_to_secattr(struct selinux_state *state,
int rc;
struct context *ctx;
- if (!state->initialized)
+ if (!selinux_initialized(state))
return 0;
read_lock(&state->ss->policy_rwlock);
@@ -3688,7 +3769,7 @@ int security_read_policy(struct selinux_state *state,
int rc;
struct policy_file fp;
- if (!state->initialized)
+ if (!selinux_initialized(state))
return -EINVAL;
*len = security_policydb_len(state);
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index 9a36de860368..c5896f39e8f6 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -8,7 +8,7 @@
#define _SS_SERVICES_H_
#include "policydb.h"
-#include "sidtab.h"
+#include "context.h"
/* Mapping for a single class */
struct selinux_mapping {
@@ -31,7 +31,7 @@ struct selinux_ss {
struct selinux_map map;
struct page *status_page;
struct mutex status_lock;
-};
+} __randomize_layout;
void services_compute_xperms_drivers(struct extended_perms *xperms,
struct avtab_node *node);
@@ -39,4 +39,6 @@ void services_compute_xperms_drivers(struct extended_perms *xperms,
void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
struct avtab_node *node);
+int context_add_hash(struct policydb *policydb, struct context *context);
+
#endif /* _SS_SERVICES_H_ */
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e63a90ff2728..a308ce1e6a13 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -9,52 +9,135 @@
*/
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
+struct sidtab_str_cache {
+ struct rcu_head rcu_member;
+ struct list_head lru_member;
+ struct sidtab_entry *parent;
+ u32 len;
+ char str[];
+};
+
+#define index_to_sid(index) (index + SECINITSID_NUM + 1)
+#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
+
int sidtab_init(struct sidtab *s)
{
u32 i;
memset(s->roots, 0, sizeof(s->roots));
- for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
- atomic_set(&s->rcache[i], -1);
-
for (i = 0; i < SECINITSID_NUM; i++)
s->isids[i].set = 0;
- atomic_set(&s->count, 0);
-
+ s->count = 0;
s->convert = NULL;
+ hash_init(s->context_to_sid);
spin_lock_init(&s->lock);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
+ INIT_LIST_HEAD(&s->cache_lru_list);
+ spin_lock_init(&s->cache_lock);
+#endif
+
return 0;
}
+static u32 context_to_sid(struct sidtab *s, struct context *context)
+{
+ struct sidtab_entry *entry;
+ u32 sid = 0;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(s->context_to_sid, entry, list,
+ context->hash) {
+ if (context_cmp(&entry->context, context)) {
+ sid = entry->sid;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return sid;
+}
+
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
- struct sidtab_isid_entry *entry;
+ struct sidtab_isid_entry *isid;
int rc;
if (sid == 0 || sid > SECINITSID_NUM)
return -EINVAL;
- entry = &s->isids[sid - 1];
+ isid = &s->isids[sid - 1];
- rc = context_cpy(&entry->context, context);
+ rc = context_cpy(&isid->entry.context, context);
if (rc)
return rc;
- entry->set = 1;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ isid->entry.cache = NULL;
+#endif
+ isid->set = 1;
+
+ /*
+ * Multiple initial sids may map to the same context. Check that this
+ * context is not already represented in the context_to_sid hashtable
+ * to avoid duplicate entries and long linked lists upon hash
+ * collision.
+ */
+ if (!context_to_sid(s, context)) {
+ isid->entry.sid = sid;
+ hash_add(s->context_to_sid, &isid->entry.list, context->hash);
+ }
+
return 0;
}
+int sidtab_hash_stats(struct sidtab *sidtab, char *page)
+{
+ int i;
+ int chain_len = 0;
+ int slots_used = 0;
+ int entries = 0;
+ int max_chain_len = 0;
+ int cur_bucket = 0;
+ struct sidtab_entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
+ entries++;
+ if (i == cur_bucket) {
+ chain_len++;
+ if (chain_len == 1)
+ slots_used++;
+ } else {
+ cur_bucket = i;
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+ chain_len = 0;
+ }
+ }
+ rcu_read_unlock();
+
+ if (chain_len > max_chain_len)
+ max_chain_len = chain_len;
+
+ return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
+ "longest chain: %d\n", entries,
+ slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
+}
+
static u32 sidtab_level_from_count(u32 count)
{
u32 capacity = SIDTAB_LEAF_ENTRIES;
@@ -88,7 +171,8 @@ static int sidtab_alloc_roots(struct sidtab *s, u32 level)
return 0;
}
-static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
+static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
+ int alloc)
{
union sidtab_entry_inner *entry;
u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
@@ -125,166 +209,81 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
if (!entry->ptr_leaf)
return NULL;
}
- return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
+ return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
}
-static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
{
- u32 count = (u32)atomic_read(&s->count);
+ /* read entries only after reading count */
+ u32 count = smp_load_acquire(&s->count);
if (index >= count)
return NULL;
- /* read entries after reading count */
- smp_rmb();
-
return sidtab_do_lookup(s, index, 0);
}
-static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
{
- return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
}
-static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
+ int force)
{
- struct context *context;
-
if (sid != 0) {
+ struct sidtab_entry *entry;
+
if (sid > SECINITSID_NUM)
- context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
+ entry = sidtab_lookup(s, sid_to_index(sid));
else
- context = sidtab_lookup_initial(s, sid);
- if (context && (!context->len || force))
- return context;
+ entry = sidtab_lookup_initial(s, sid);
+ if (entry && (!entry->context.len || force))
+ return entry;
}
return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
}
-struct context *sidtab_search(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
-struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
-static int sidtab_find_context(union sidtab_entry_inner entry,
- u32 *pos, u32 count, u32 level,
- struct context *context, u32 *index)
-{
- int rc;
- u32 i;
-
- if (level != 0) {
- struct sidtab_node_inner *node = entry.ptr_inner;
-
- i = 0;
- while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
- rc = sidtab_find_context(node->entries[i],
- pos, count, level - 1,
- context, index);
- if (rc == 0)
- return 0;
- i++;
- }
- } else {
- struct sidtab_node_leaf *node = entry.ptr_leaf;
-
- i = 0;
- while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
- if (context_cmp(&node->entries[i].context, context)) {
- *index = *pos;
- return 0;
- }
- (*pos)++;
- i++;
- }
- }
- return -ENOENT;
-}
-
-static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
-{
- while (pos > 0) {
- atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1]));
- --pos;
- }
- atomic_set(&s->rcache[0], (int)index);
-}
-
-static void sidtab_rcache_push(struct sidtab *s, u32 index)
-{
- sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
-}
-
-static int sidtab_rcache_search(struct sidtab *s, struct context *context,
- u32 *index)
-{
- u32 i;
-
- for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
- int v = atomic_read(&s->rcache[i]);
-
- if (v < 0)
- continue;
-
- if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) {
- sidtab_rcache_update(s, (u32)v, i);
- *index = (u32)v;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
- u32 *index)
+int sidtab_context_to_sid(struct sidtab *s, struct context *context,
+ u32 *sid)
{
unsigned long flags;
- u32 count = (u32)atomic_read(&s->count);
- u32 count_locked, level, pos;
+ u32 count;
struct sidtab_convert_params *convert;
- struct context *dst, *dst_convert;
+ struct sidtab_entry *dst, *dst_convert;
int rc;
- rc = sidtab_rcache_search(s, context, index);
- if (rc == 0)
- return 0;
-
- level = sidtab_level_from_count(count);
-
- /* read entries after reading count */
- smp_rmb();
-
- pos = 0;
- rc = sidtab_find_context(s->roots[level], &pos, count, level,
- context, index);
- if (rc == 0) {
- sidtab_rcache_push(s, *index);
+ *sid = context_to_sid(s, context);
+ if (*sid)
return 0;
- }
/* lock-free search failed: lock, re-search, and insert if not found */
spin_lock_irqsave(&s->lock, flags);
+ rc = 0;
+ *sid = context_to_sid(s, context);
+ if (*sid)
+ goto out_unlock;
+
+ /* read entries only after reading count */
+ count = smp_load_acquire(&s->count);
convert = s->convert;
- count_locked = (u32)atomic_read(&s->count);
- level = sidtab_level_from_count(count_locked);
-
- /* if count has changed before we acquired the lock, then catch up */
- while (count < count_locked) {
- if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
- sidtab_rcache_push(s, count);
- *index = count;
- rc = 0;
- goto out_unlock;
- }
- ++count;
- }
+
+ /* bail out if we already reached max entries */
+ rc = -EOVERFLOW;
+ if (count >= SIDTAB_MAX)
+ goto out_unlock;
/* insert context into new entry */
rc = -ENOMEM;
@@ -292,7 +291,9 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
if (!dst)
goto out_unlock;
- rc = context_cpy(dst, context);
+ dst->sid = index_to_sid(count);
+
+ rc = context_cpy(&dst->context, context);
if (rc)
goto out_unlock;
@@ -304,31 +305,32 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
rc = -ENOMEM;
dst_convert = sidtab_do_lookup(convert->target, count, 1);
if (!dst_convert) {
- context_destroy(dst);
+ context_destroy(&dst->context);
goto out_unlock;
}
- rc = convert->func(context, dst_convert, convert->args);
+ rc = convert->func(context, &dst_convert->context,
+ convert->args);
if (rc) {
- context_destroy(dst);
+ context_destroy(&dst->context);
goto out_unlock;
}
+ dst_convert->sid = index_to_sid(count);
+ convert->target->count = count + 1;
- /* at this point we know the insert won't fail */
- atomic_set(&convert->target->count, count + 1);
+ hash_add_rcu(convert->target->context_to_sid,
+ &dst_convert->list, dst_convert->context.hash);
}
if (context->len)
pr_info("SELinux: Context %s is not valid (left unmapped).\n",
context->str);
- sidtab_rcache_push(s, count);
- *index = count;
-
- /* write entries before writing new count */
- smp_wmb();
+ *sid = index_to_sid(count);
- atomic_set(&s->count, count + 1);
+ /* write entries before updating count */
+ smp_store_release(&s->count, count + 1);
+ hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
rc = 0;
out_unlock:
@@ -336,25 +338,19 @@ out_unlock:
return rc;
}
-int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
+static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
{
- int rc;
+ struct sidtab_entry *entry;
u32 i;
- for (i = 0; i < SECINITSID_NUM; i++) {
- struct sidtab_isid_entry *entry = &s->isids[i];
+ for (i = 0; i < count; i++) {
+ entry = sidtab_do_lookup(s, i, 0);
+ entry->sid = index_to_sid(i);
- if (entry->set && context_cmp(context, &entry->context)) {
- *sid = i + 1;
- return 0;
- }
- }
+ hash_add_rcu(s->context_to_sid, &entry->list,
+ entry->context.hash);
- rc = sidtab_reverse_lookup(s, context, sid);
- if (rc)
- return rc;
- *sid += SECINITSID_NUM + 1;
- return 0;
+ }
}
static int sidtab_convert_tree(union sidtab_entry_inner *edst,
@@ -418,7 +414,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
return -EBUSY;
}
- count = (u32)atomic_read(&s->count);
+ count = s->count;
level = sidtab_level_from_count(count);
/* allocate last leaf in the new sidtab (to avoid race with
@@ -431,12 +427,12 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
}
/* set count in case no new entries are added during conversion */
- atomic_set(&params->target->count, count);
+ params->target->count = count;
/* enable live convert of new entries */
s->convert = params;
- /* we can safely do the rest of the conversion outside the lock */
+ /* we can safely convert the tree outside the lock */
spin_unlock_irqrestore(&s->lock, flags);
pr_info("SELinux: Converting %u SID table entries...\n", count);
@@ -450,8 +446,25 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
spin_lock_irqsave(&s->lock, flags);
s->convert = NULL;
spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
}
- return rc;
+ /*
+ * The hashtable can also be modified in sidtab_context_to_sid()
+ * so we must re-acquire the lock here.
+ */
+ spin_lock_irqsave(&s->lock, flags);
+ sidtab_convert_hashtable(params->target, count);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ return 0;
+}
+
+static void sidtab_destroy_entry(struct sidtab_entry *entry)
+{
+ context_destroy(&entry->context);
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ kfree(rcu_dereference_raw(entry->cache));
+#endif
}
static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
@@ -474,7 +487,7 @@ static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
return;
for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
- context_destroy(&node->entries[i].context);
+ sidtab_destroy_entry(&node->entries[i]);
kfree(node);
}
}
@@ -485,11 +498,101 @@ void sidtab_destroy(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
if (s->isids[i].set)
- context_destroy(&s->isids[i].context);
+ sidtab_destroy_entry(&s->isids[i].entry);
level = SIDTAB_MAX_LEVEL;
while (level && !s->roots[level].ptr_inner)
--level;
sidtab_destroy_tree(s->roots[level], level);
+ /*
+ * The context_to_sid hashtable's objects are all shared
+ * with the isids array and context tree, and so don't need
+ * to be cleaned up here.
+ */
+}
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+ const char *str, u32 str_len)
+{
+ struct sidtab_str_cache *cache, *victim = NULL;
+
+ /* do not cache invalid contexts */
+ if (entry->context.len)
+ return;
+
+ /*
+ * Skip the put operation when in non-task context to avoid the need
+ * to disable interrupts while holding s->cache_lock.
+ */
+ if (!in_task())
+ return;
+
+ spin_lock(&s->cache_lock);
+
+ cache = rcu_dereference_protected(entry->cache,
+ lockdep_is_held(&s->cache_lock));
+ if (cache) {
+ /* entry in cache - just bump to the head of LRU list */
+ list_move(&cache->lru_member, &s->cache_lru_list);
+ goto out_unlock;
+ }
+
+ cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
+ if (!cache)
+ goto out_unlock;
+
+ if (s->cache_free_slots == 0) {
+ /* pop a cache entry from the tail and free it */
+ victim = container_of(s->cache_lru_list.prev,
+ struct sidtab_str_cache, lru_member);
+ list_del(&victim->lru_member);
+ rcu_assign_pointer(victim->parent->cache, NULL);
+ } else {
+ s->cache_free_slots--;
+ }
+ cache->parent = entry;
+ cache->len = str_len;
+ memcpy(cache->str, str, str_len);
+ list_add(&cache->lru_member, &s->cache_lru_list);
+
+ rcu_assign_pointer(entry->cache, cache);
+
+out_unlock:
+ spin_unlock(&s->cache_lock);
+ kfree_rcu(victim, rcu_member);
}
+
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+ char **out, u32 *out_len)
+{
+ struct sidtab_str_cache *cache;
+ int rc = 0;
+
+ if (entry->context.len)
+ return -ENOENT; /* do not cache invalid contexts */
+
+ rcu_read_lock();
+
+ cache = rcu_dereference(entry->cache);
+ if (!cache) {
+ rc = -ENOENT;
+ } else {
+ *out_len = cache->len;
+ if (out) {
+ *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
+ if (!*out)
+ rc = -ENOMEM;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!rc && out)
+ sidtab_sid2str_put(s, entry, *out, *out_len);
+ return rc;
+}
+
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index bbd5c0d1f3bd..3311d9f236c0 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -13,16 +13,19 @@
#include <linux/spinlock_types.h>
#include <linux/log2.h>
+#include <linux/hashtable.h>
#include "context.h"
-struct sidtab_entry_leaf {
+struct sidtab_entry {
+ u32 sid;
struct context context;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ struct sidtab_str_cache __rcu *cache;
+#endif
+ struct hlist_node list;
};
-struct sidtab_node_inner;
-struct sidtab_node_leaf;
-
union sidtab_entry_inner {
struct sidtab_node_inner *ptr_inner;
struct sidtab_node_leaf *ptr_leaf;
@@ -38,17 +41,17 @@ union sidtab_entry_inner {
(SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
#define SIDTAB_LEAF_ENTRIES \
- (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
+ (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry))
-#define SIDTAB_MAX_BITS 31 /* limited to INT_MAX due to atomic_t range */
-#define SIDTAB_MAX (((u32)1 << SIDTAB_MAX_BITS) - 1)
+#define SIDTAB_MAX_BITS 32
+#define SIDTAB_MAX U32_MAX
/* ensure enough tree levels for SIDTAB_MAX entries */
#define SIDTAB_MAX_LEVEL \
DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
SIDTAB_INNER_SHIFT)
struct sidtab_node_leaf {
- struct sidtab_entry_leaf entries[SIDTAB_LEAF_ENTRIES];
+ struct sidtab_entry entries[SIDTAB_LEAF_ENTRIES];
};
struct sidtab_node_inner {
@@ -57,7 +60,7 @@ struct sidtab_node_inner {
struct sidtab_isid_entry {
int set;
- struct context context;
+ struct sidtab_entry entry;
};
struct sidtab_convert_params {
@@ -66,25 +69,56 @@ struct sidtab_convert_params {
struct sidtab *target;
};
-#define SIDTAB_RCACHE_SIZE 3
+#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
+#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
struct sidtab {
+ /*
+ * lock-free read access only for as many items as a prior read of
+ * 'count'
+ */
union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
- atomic_t count;
+ /*
+ * access atomically via {READ|WRITE}_ONCE(); only increment under
+ * spinlock
+ */
+ u32 count;
+ /* access only under spinlock */
struct sidtab_convert_params *convert;
spinlock_t lock;
- /* reverse lookup cache */
- atomic_t rcache[SIDTAB_RCACHE_SIZE];
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ /* SID -> context string cache */
+ u32 cache_free_slots;
+ struct list_head cache_lru_list;
+ spinlock_t cache_lock;
+#endif
/* index == SID - 1 (no entry for SECSID_NULL) */
struct sidtab_isid_entry isids[SECINITSID_NUM];
+
+ /* Hash table for fast reverse context-to-sid lookups. */
+ DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
};
int sidtab_init(struct sidtab *s);
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
-struct context *sidtab_search(struct sidtab *s, u32 sid);
-struct context *sidtab_search_force(struct sidtab *s, u32 sid);
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid);
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid);
+
+static inline struct context *sidtab_search(struct sidtab *s, u32 sid)
+{
+ struct sidtab_entry *entry = sidtab_search_entry(s, sid);
+
+ return entry ? &entry->context : NULL;
+}
+
+static inline struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+{
+ struct sidtab_entry *entry = sidtab_search_entry_force(s, sid);
+
+ return entry ? &entry->context : NULL;
+}
int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
@@ -92,6 +126,27 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s);
+int sidtab_hash_stats(struct sidtab *sidtab, char *page);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+ const char *str, u32 str_len);
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+ char **out, u32 *out_len);
+#else
+static inline void sidtab_sid2str_put(struct sidtab *s,
+ struct sidtab_entry *entry,
+ const char *str, u32 str_len)
+{
+}
+static inline int sidtab_sid2str_get(struct sidtab *s,
+ struct sidtab_entry *entry,
+ char **out, u32 *out_len)
+{
+ return -ENOENT;
+}
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
+
#endif /* _SS_SIDTAB_H_ */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index f1c93a7be9ec..38ac3da4e791 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
if (i == 0 || i >= SMK_LONGLABEL)
return ERR_PTR(-EINVAL);
- smack = kzalloc(i + 1, GFP_KERNEL);
+ smack = kzalloc(i + 1, GFP_NOFS);
if (smack == NULL)
return ERR_PTR(-ENOMEM);
@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
if ((m & *cp) == 0)
continue;
rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
- cat, GFP_KERNEL);
+ cat, GFP_NOFS);
if (rc < 0) {
netlbl_catmap_free(sap->attr.mls.cat);
return rc;
@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
if (skp != NULL)
goto freeout;
- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+ skp = kzalloc(sizeof(*skp), GFP_NOFS);
if (skp == NULL) {
skp = ERR_PTR(-ENOMEM);
goto freeout;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 4c5e5a438f8b..ecea41ce919b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -28,7 +28,6 @@
#include <linux/icmpv6.h>
#include <linux/slab.h>
#include <linux/mutex.h>
-#include <linux/pipe_fs_i.h>
#include <net/cipso_ipv4.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -288,7 +287,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
if (!(ip->i_opflags & IOP_XATTR))
return ERR_PTR(-EOPNOTSUPP);
- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
@@ -307,7 +306,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
/**
* init_inode_smack - initialize an inode security blob
- * @isp: the blob to initialize
+ * @inode: inode to extract the info from
* @skp: a pointer to the Smack label entry to use in the blob
*
*/
@@ -509,7 +508,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
/**
* smack_syslog - Smack approval on syslog
- * @type: message type
+ * @typefrom_file: unused
*
* Returns 0 on success, error code otherwise.
*/
@@ -765,7 +764,7 @@ static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts)
/**
* smack_set_mnt_opts - set Smack specific mount options
* @sb: the file system superblock
- * @opts: Smack mount options
+ * @mnt_opts: Smack mount options
* @kern_flags: mount option from kernel space or user space
* @set_kern_flags: where to store converted mount opts
*
@@ -937,7 +936,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (rc != 0)
return rc;
- } else if (bprm->unsafe)
+ }
+ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
return -EPERM;
bsp->smk_task = isp->smk_task;
@@ -958,7 +958,7 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
* smack_inode_alloc_security - allocate an inode blob
* @inode: the inode in need of a blob
*
- * Returns 0 if it gets a blob, -ENOMEM otherwise
+ * Returns 0
*/
static int smack_inode_alloc_security(struct inode *inode)
{
@@ -1164,7 +1164,7 @@ static int smack_inode_rename(struct inode *old_inode,
*
* This is the important Smack hook.
*
- * Returns 0 if access is permitted, -EACCES otherwise
+ * Returns 0 if access is permitted, an error code otherwise
*/
static int smack_inode_permission(struct inode *inode, int mask)
{
@@ -1222,8 +1222,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
/**
* smack_inode_getattr - Smack check for getting attributes
- * @mnt: vfsmount of the object
- * @dentry: the object
+ * @path: path to extract the info from
*
* Returns 0 if access is permitted, an error code otherwise
*/
@@ -1870,14 +1869,13 @@ static int smack_file_receive(struct file *file)
/**
* smack_file_open - Smack dentry open processing
* @file: the object
- * @cred: task credential
*
* Set the security blob in the file structure.
* Allow the open only if the task has read access. There are
* many read operations (e.g. fstat) that you can do with an
* fd even if you have the file open write-only.
*
- * Returns 0
+ * Returns 0 if current has access, error code otherwise
*/
static int smack_file_open(struct file *file)
{
@@ -1900,7 +1898,7 @@ static int smack_file_open(struct file *file)
/**
* smack_cred_alloc_blank - "allocate" blank task-level security credentials
- * @new: the new credentials
+ * @cred: the new credentials
* @gfp: the atomicity of any memory allocations
*
* Prepare a blank set of credentials for modification. This must allocate all
@@ -1983,7 +1981,7 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
/**
* smack_cred_getsecid - get the secid corresponding to a creds structure
- * @c: the object creds
+ * @cred: the object creds
* @secid: where to put the result
*
* Sets the secid to contain a u32 version of the smack label.
@@ -2140,8 +2138,6 @@ static int smack_task_getioprio(struct task_struct *p)
/**
* smack_task_setscheduler - Smack check on setting scheduler
* @p: the task object
- * @policy: unused
- * @lp: unused
*
* Return 0 if read access is permitted
*/
@@ -2611,8 +2607,9 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
/**
* smk_ipv6_port_check - check Smack port access
- * @sock: socket
+ * @sk: socket
* @address: address
+ * @act: the action being taken
*
* Create or update the port list entry
*/
@@ -2782,7 +2779,7 @@ static int smack_socket_post_create(struct socket *sock, int family,
*
* Cross reference the peer labels for SO_PEERSEC
*
- * Returns 0 on success, and error code otherwise
+ * Returns 0
*/
static int smack_socket_socketpair(struct socket *socka,
struct socket *sockb)
@@ -3014,13 +3011,13 @@ static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd)
*
* Returns 0 if current has the requested access, error code otherwise
*/
-static int smack_shm_shmat(struct kern_ipc_perm *ipc, char __user *shmaddr,
+static int smack_shm_shmat(struct kern_ipc_perm *isp, char __user *shmaddr,
int shmflg)
{
int may;
may = smack_flags_to_may(shmflg);
- return smk_curacc_shm(ipc, may);
+ return smk_curacc_shm(isp, may);
}
/**
@@ -3925,6 +3922,8 @@ access_check:
skp = smack_ipv6host_label(&sadd);
if (skp == NULL)
skp = smack_net_ambient;
+ if (skb == NULL)
+ break;
#ifdef CONFIG_AUDIT
smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
ad.a.u.net->family = family;
@@ -4762,7 +4761,7 @@ static __init void init_smack_known_list(void)
/**
* smack_init - initialize the smack system
*
- * Returns 0
+ * Returns 0 on success, -ENOMEM is there's no memory
*/
static __init int smack_init(void)
{
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index dd3d5942e669..1b467381986f 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
exe = tomoyo_get_exe();
if (!exe)
return false;
- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (!ptr->head.is_deleted &&
(!tomoyo_pathcmp(domainname, ptr->manager) ||
!strcmp(exe, ptr->manager->name))) {
@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -EINTR;
/* Is there an active domain? */
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -2320,9 +2322,9 @@ static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
[TOMOYO_MEMORY_QUERY] = "query message:",
};
-/* Timestamp counter for last updated. */
-static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
/* Counter for number of updates. */
+static atomic_t tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
+/* Timestamp counter for last updated. */
static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
/**
@@ -2334,10 +2336,7 @@ static time64_t tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
*/
void tomoyo_update_stat(const u8 index)
{
- /*
- * I don't use atomic operations because race condition is not fatal.
- */
- tomoyo_stat_updated[index]++;
+ atomic_inc(&tomoyo_stat_updated[index]);
tomoyo_stat_modified[index] = ktime_get_real_seconds();
}
@@ -2358,7 +2357,7 @@ static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
tomoyo_io_printf(head, "Policy %-30s %10u",
tomoyo_policy_headers[i],
- tomoyo_stat_updated[i]);
+ atomic_read(&tomoyo_stat_updated[i]));
if (tomoyo_stat_modified[i]) {
struct tomoyo_time stamp;
@@ -2778,7 +2777,8 @@ void tomoyo_check_profile(void)
tomoyo_policy_loaded = true;
pr_info("TOMOYO: 2.6.0\n");
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
const u8 profile = domain->profile;
struct tomoyo_policy_namespace *ns = domain->ns;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 8526a0a74023..7869d6a9980b 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
if (mutex_lock_interruptible(&tomoyo_policy_lock))
return -ENOMEM;
- list_for_each_entry_rcu(entry, list, list) {
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!check_duplicate(entry, new_entry))
@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
}
if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
- list_for_each_entry_rcu(entry, list, list) {
+ list_for_each_entry_rcu(entry, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
continue;
if (!tomoyo_same_acl_head(entry, new_entry) ||
@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
u16 i = 0;
retry:
- list_for_each_entry_rcu(ptr, list, list) {
+ list_for_each_entry_rcu(ptr, list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->is_deleted || ptr->type != r->param_type)
continue;
if (!check_entry(r, ptr))
@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
{
const struct tomoyo_transition_control *ptr;
- list_for_each_entry_rcu(ptr, list, head.list) {
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->head.is_deleted || ptr->type != type)
continue;
if (ptr->domainname) {
@@ -735,7 +739,8 @@ retry:
/* Check 'aggregator' directive. */
candidate = &exename;
- list_for_each_entry_rcu(ptr, list, head.list) {
+ list_for_each_entry_rcu(ptr, list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (ptr->head.is_deleted ||
!tomoyo_path_matches_pattern(&exename,
ptr->original_name))
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
index a37c7dc66e44..1cecdd797597 100644
--- a/security/tomoyo/group.c
+++ b/security/tomoyo/group.c
@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
{
struct tomoyo_path_group *member;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (!tomoyo_path_matches_pattern(pathname, member->member_name))
@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
struct tomoyo_number_group *member;
bool matched = false;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (min > member->number.values[1] ||
@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
bool matched = false;
const u8 size = is_ipv6 ? 16 : 4;
- list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ list_for_each_entry_rcu(member, &group->member_list, head.list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (member->head.is_deleted)
continue;
if (member->address.is_ipv6 != is_ipv6)
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index e7832448d721..bf38fc1b59b2 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -218,31 +218,6 @@ out:
}
/**
- * tomoyo_get_socket_name - Get the name of a socket.
- *
- * @path: Pointer to "struct path".
- * @buffer: Pointer to buffer to return value in.
- * @buflen: Sizeof @buffer.
- *
- * Returns the buffer.
- */
-static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
- const int buflen)
-{
- struct inode *inode = d_backing_inode(path->dentry);
- struct socket *sock = inode ? SOCKET_I(inode) : NULL;
- struct sock *sk = sock ? sock->sk : NULL;
-
- if (sk) {
- snprintf(buffer, buflen, "socket:[family=%u:type=%u:protocol=%u]",
- sk->sk_family, sk->sk_type, sk->sk_protocol);
- } else {
- snprintf(buffer, buflen, "socket:[unknown]");
- }
- return buffer;
-}
-
-/**
* tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
*
* @path: Pointer to "struct path".
@@ -279,12 +254,7 @@ char *tomoyo_realpath_from_path(const struct path *path)
break;
/* To make sure that pos is '\0' terminated. */
buf[buf_len - 1] = '\0';
- /* Get better name for socket. */
- if (sb->s_magic == SOCKFS_MAGIC) {
- pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
- goto encode;
- }
- /* For "pipe:[\$]". */
+ /* For "pipe:[\$]" and "socket:[\$]". */
if (dentry->d_op && dentry->d_op->d_dname) {
pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
goto encode;
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 52752e1a84ed..eba0b3395851 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
return false;
if (!domain)
return true;
- list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
+ srcu_read_lock_held(&tomoyo_ss)) {
u16 perm;
u8 i;