summaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig15
-rw-r--r--security/integrity/iint.c1
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/yama/yama_lsm.c11
4 files changed, 20 insertions, 9 deletions
diff --git a/security/Kconfig b/security/Kconfig
index b0cb9a5f9448..c4302067a3ad 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -154,6 +154,7 @@ config HARDENED_USERCOPY
bool "Harden memory copies between kernel and userspace"
depends on HAVE_HARDENED_USERCOPY_ALLOCATOR
select BUG
+ imply STRICT_DEVMEM
help
This option checks for obviously wrong memory regions when
copying memory to/from the kernel (via copy_to_user() and
@@ -163,6 +164,20 @@ config HARDENED_USERCOPY
or are part of the kernel text. This kills entire classes
of heap overflow exploits and similar kernel memory exposures.
+config HARDENED_USERCOPY_FALLBACK
+ bool "Allow usercopy whitelist violations to fallback to object size"
+ depends on HARDENED_USERCOPY
+ default y
+ help
+ This is a temporary option that allows missing usercopy whitelists
+ to be discovered via a WARN() to the kernel log, instead of
+ rejecting the copy, falling back to non-whitelisted hardened
+ usercopy that checks the slab allocation size instead of the
+ whitelist size. This option will be removed once it seems like
+ all missing usercopy whitelists have been identified and fixed.
+ Booting with "slab_common.usercopy_fallback=Y/N" can change
+ this setting.
+
config HARDENED_USERCOPY_PAGESPAN
bool "Refuse to copy allocations that span multiple pages"
depends on HARDENED_USERCOPY
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index fc38ca08dbb5..9700e96ab0f0 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -74,6 +74,7 @@ static void iint_free(struct integrity_iint_cache *iint)
iint->ima_hash = NULL;
iint->version = 0;
iint->flags = 0UL;
+ iint->atomic_flags = 0UL;
iint->ima_file_status = INTEGRITY_UNKNOWN;
iint->ima_mmap_status = INTEGRITY_UNKNOWN;
iint->ima_bprm_status = INTEGRITY_UNKNOWN;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 061425dd6400..2cfb0c714967 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -135,7 +135,7 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
update = test_and_clear_bit(IMA_UPDATE_XATTR,
&iint->atomic_flags);
if (!IS_I_VERSION(inode) ||
- inode_cmp_iversion(inode, iint->version) ||
+ !inode_eq_iversion(inode, iint->version) ||
(iint->flags & IMA_NEW_FILE)) {
iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
iint->measured_pcrs = 0;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 8298e094f4f7..ffda91a4a1aa 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -250,15 +250,10 @@ int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
} else {
struct task_struct *tracer;
- rcu_read_lock();
- tracer = find_task_by_vpid(arg2);
- if (tracer)
- get_task_struct(tracer);
- else
+ tracer = find_get_task_by_vpid(arg2);
+ if (!tracer) {
rc = -EINVAL;
- rcu_read_unlock();
-
- if (tracer) {
+ } else {
rc = yama_ptracer_add(tracer, myself);
put_task_struct(tracer);
}