diff options
Diffstat (limited to 'security/selinux/avc.c')
| -rw-r--r-- | security/selinux/avc.c | 877 |
1 files changed, 644 insertions, 233 deletions
diff --git a/security/selinux/avc.c b/security/selinux/avc.c index dad36a6ab45f..8f77b9a732e1 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -1,17 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Implementation of the kernel access vector cache (AVC). * - * Authors: Stephen Smalley, <sds@epoch.ncsc.mil> + * Authors: Stephen Smalley, <stephen.smalley.work@gmail.com> * James Morris <jmorris@redhat.com> * * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com> * Replaced the avc_lock spinlock by RCU. * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2, - * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/stddef.h> @@ -22,6 +19,7 @@ #include <linux/init.h> #include <linux/skbuff.h> #include <linux/percpu.h> +#include <linux/list.h> #include <net/sock.h> #include <linux/un.h> #include <net/af_unix.h> @@ -32,10 +30,14 @@ #include "avc.h" #include "avc_ss.h" #include "classmap.h" +#include "hash.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/avc.h> -#define AVC_CACHE_SLOTS 512 -#define AVC_DEF_CACHE_THRESHOLD 512 -#define AVC_CACHE_RECLAIM 16 +#define AVC_CACHE_SLOTS (1 << CONFIG_SECURITY_SELINUX_AVC_HASH_BITS) +#define AVC_DEF_CACHE_THRESHOLD AVC_CACHE_SLOTS +#define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) @@ -48,6 +50,7 @@ struct avc_entry { u32 tsid; u16 tclass; struct av_decision avd; + struct avc_xperms_node *xp_node; }; struct avc_node { @@ -56,6 +59,16 @@ struct avc_node { struct rcu_head rhead; }; +struct avc_xperms_decision_node { + struct extended_perms_decision xpd; + struct list_head xpd_list; /* list of extended_perms_decision */ +}; + +struct avc_xperms_node { + struct extended_perms xp; + struct list_head xpd_head; /* list head of extended_perms_decision */ +}; + struct avc_cache { struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ @@ -70,87 +83,49 @@ struct avc_callback_node { struct avc_callback_node *next; }; -/* Exported via selinufs */ -unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; - #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; #endif -static struct avc_cache avc_cache; -static struct avc_callback_node *avc_callbacks; -static struct kmem_cache *avc_node_cachep; +struct selinux_avc { + unsigned int avc_cache_threshold; + struct avc_cache avc_cache; +}; -static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) -{ - return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); -} +static struct selinux_avc selinux_avc; -/** - * avc_dump_av - Display an access vector in human-readable form. - * @tclass: target security class - * @av: access vector - */ -static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) +void selinux_avc_init(void) { - const char **perms; - int i, perm; - - if (av == 0) { - audit_log_format(ab, " null"); - return; - } - - perms = secclass_map[tclass-1].perms; + int i; - audit_log_format(ab, " {"); - i = 0; - perm = 1; - while (i < (sizeof(av) * 8)) { - if ((perm & av) && perms[i]) { - audit_log_format(ab, " %s", perms[i]); - av &= ~perm; - } - i++; - perm <<= 1; + selinux_avc.avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; + for (i = 0; i < AVC_CACHE_SLOTS; i++) { + INIT_HLIST_HEAD(&selinux_avc.avc_cache.slots[i]); + spin_lock_init(&selinux_avc.avc_cache.slots_lock[i]); } + atomic_set(&selinux_avc.avc_cache.active_nodes, 0); + atomic_set(&selinux_avc.avc_cache.lru_hint, 0); +} - if (av) - audit_log_format(ab, " 0x%x", av); - - audit_log_format(ab, " }"); +unsigned int avc_get_cache_threshold(void) +{ + return selinux_avc.avc_cache_threshold; } -/** - * avc_dump_query - Display a SID pair and a class in human-readable form. - * @ssid: source security identifier - * @tsid: target security identifier - * @tclass: target security class - */ -static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass) +void avc_set_cache_threshold(unsigned int cache_threshold) { - int rc; - char *scontext; - u32 scontext_len; + selinux_avc.avc_cache_threshold = cache_threshold; +} - rc = security_sid_to_context(ssid, &scontext, &scontext_len); - if (rc) - audit_log_format(ab, "ssid=%d", ssid); - else { - audit_log_format(ab, "scontext=%s", scontext); - kfree(scontext); - } +static struct avc_callback_node *avc_callbacks __ro_after_init; +static struct kmem_cache *avc_node_cachep __ro_after_init; +static struct kmem_cache *avc_xperms_data_cachep __ro_after_init; +static struct kmem_cache *avc_xperms_decision_cachep __ro_after_init; +static struct kmem_cache *avc_xperms_cachep __ro_after_init; - rc = security_sid_to_context(tsid, &scontext, &scontext_len); - if (rc) - audit_log_format(ab, " tsid=%d", tsid); - else { - audit_log_format(ab, " tcontext=%s", scontext); - kfree(scontext); - } - - BUG_ON(tclass >= ARRAY_SIZE(secclass_map)); - audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); +static inline u32 avc_hash(u32 ssid, u32 tsid, u16 tclass) +{ + return av_hash(ssid, tsid, (u32)tclass, (u32)(AVC_CACHE_SLOTS - 1)); } /** @@ -160,19 +135,10 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla */ void __init avc_init(void) { - int i; - - for (i = 0; i < AVC_CACHE_SLOTS; i++) { - INIT_HLIST_HEAD(&avc_cache.slots[i]); - spin_lock_init(&avc_cache.slots_lock[i]); - } - atomic_set(&avc_cache.active_nodes, 0); - atomic_set(&avc_cache.lru_hint, 0); - - avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), - 0, SLAB_PANIC, NULL); - - audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); + avc_node_cachep = KMEM_CACHE(avc_node, SLAB_PANIC); + avc_xperms_cachep = KMEM_CACHE(avc_xperms_node, SLAB_PANIC); + avc_xperms_decision_cachep = KMEM_CACHE(avc_xperms_decision_node, SLAB_PANIC); + avc_xperms_data_cachep = KMEM_CACHE(extended_perms_data, SLAB_PANIC); } int avc_get_hash_stats(char *page) @@ -186,7 +152,7 @@ int avc_get_hash_stats(char *page) slots_used = 0; max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { - head = &avc_cache.slots[i]; + head = &selinux_avc.avc_cache.slots[i]; if (!hlist_empty(head)) { slots_used++; chain_len = 0; @@ -201,13 +167,269 @@ int avc_get_hash_stats(char *page) return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" "longest chain: %d\n", - atomic_read(&avc_cache.active_nodes), + atomic_read(&selinux_avc.avc_cache.active_nodes), slots_used, AVC_CACHE_SLOTS, max_chain_len); } +/* + * using a linked list for extended_perms_decision lookup because the list is + * always small. i.e. less than 5, typically 1 + */ +static struct extended_perms_decision * +avc_xperms_decision_lookup(u8 driver, u8 base_perm, + struct avc_xperms_node *xp_node) +{ + struct avc_xperms_decision_node *xpd_node; + + list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) { + if (xpd_node->xpd.driver == driver && + xpd_node->xpd.base_perm == base_perm) + return &xpd_node->xpd; + } + return NULL; +} + +static inline unsigned int +avc_xperms_has_perm(struct extended_perms_decision *xpd, + u8 perm, u8 which) +{ + unsigned int rc = 0; + + if ((which == XPERMS_ALLOWED) && + (xpd->used & XPERMS_ALLOWED)) + rc = security_xperm_test(xpd->allowed->p, perm); + else if ((which == XPERMS_AUDITALLOW) && + (xpd->used & XPERMS_AUDITALLOW)) + rc = security_xperm_test(xpd->auditallow->p, perm); + else if ((which == XPERMS_DONTAUDIT) && + (xpd->used & XPERMS_DONTAUDIT)) + rc = security_xperm_test(xpd->dontaudit->p, perm); + return rc; +} + +static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node, + u8 driver, u8 base_perm, u8 perm) +{ + struct extended_perms_decision *xpd; + security_xperm_set(xp_node->xp.drivers.p, driver); + xp_node->xp.base_perms |= base_perm; + xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node); + if (xpd && xpd->allowed) + security_xperm_set(xpd->allowed->p, perm); +} + +static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node) +{ + struct extended_perms_decision *xpd; + + xpd = &xpd_node->xpd; + if (xpd->allowed) + kmem_cache_free(avc_xperms_data_cachep, xpd->allowed); + if (xpd->auditallow) + kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow); + if (xpd->dontaudit) + kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit); + kmem_cache_free(avc_xperms_decision_cachep, xpd_node); +} + +static void avc_xperms_free(struct avc_xperms_node *xp_node) +{ + struct avc_xperms_decision_node *xpd_node, *tmp; + + if (!xp_node) + return; + + list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) { + list_del(&xpd_node->xpd_list); + avc_xperms_decision_free(xpd_node); + } + kmem_cache_free(avc_xperms_cachep, xp_node); +} + +static void avc_copy_xperms_decision(struct extended_perms_decision *dest, + struct extended_perms_decision *src) +{ + dest->base_perm = src->base_perm; + dest->driver = src->driver; + dest->used = src->used; + if (dest->used & XPERMS_ALLOWED) + memcpy(dest->allowed->p, src->allowed->p, + sizeof(src->allowed->p)); + if (dest->used & XPERMS_AUDITALLOW) + memcpy(dest->auditallow->p, src->auditallow->p, + sizeof(src->auditallow->p)); + if (dest->used & XPERMS_DONTAUDIT) + memcpy(dest->dontaudit->p, src->dontaudit->p, + sizeof(src->dontaudit->p)); +} + +/* + * similar to avc_copy_xperms_decision, but only copy decision + * information relevant to this perm + */ +static inline void avc_quick_copy_xperms_decision(u8 perm, + struct extended_perms_decision *dest, + struct extended_perms_decision *src) +{ + /* + * compute index of the u32 of the 256 bits (8 u32s) that contain this + * command permission + */ + u8 i = perm >> 5; + + dest->base_perm = src->base_perm; + dest->used = src->used; + if (dest->used & XPERMS_ALLOWED) + dest->allowed->p[i] = src->allowed->p[i]; + if (dest->used & XPERMS_AUDITALLOW) + dest->auditallow->p[i] = src->auditallow->p[i]; + if (dest->used & XPERMS_DONTAUDIT) + dest->dontaudit->p[i] = src->dontaudit->p[i]; +} + +static struct avc_xperms_decision_node + *avc_xperms_decision_alloc(u8 which) +{ + struct avc_xperms_decision_node *xpd_node; + struct extended_perms_decision *xpd; + + xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT); + if (!xpd_node) + return NULL; + + xpd = &xpd_node->xpd; + if (which & XPERMS_ALLOWED) { + xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep, + GFP_NOWAIT); + if (!xpd->allowed) + goto error; + } + if (which & XPERMS_AUDITALLOW) { + xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep, + GFP_NOWAIT); + if (!xpd->auditallow) + goto error; + } + if (which & XPERMS_DONTAUDIT) { + xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep, + GFP_NOWAIT); + if (!xpd->dontaudit) + goto error; + } + return xpd_node; +error: + avc_xperms_decision_free(xpd_node); + return NULL; +} + +static int avc_add_xperms_decision(struct avc_node *node, + struct extended_perms_decision *src) +{ + struct avc_xperms_decision_node *dest_xpd; + + dest_xpd = avc_xperms_decision_alloc(src->used); + if (!dest_xpd) + return -ENOMEM; + avc_copy_xperms_decision(&dest_xpd->xpd, src); + list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head); + node->ae.xp_node->xp.len++; + return 0; +} + +static struct avc_xperms_node *avc_xperms_alloc(void) +{ + struct avc_xperms_node *xp_node; + + xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT); + if (!xp_node) + return xp_node; + INIT_LIST_HEAD(&xp_node->xpd_head); + return xp_node; +} + +static int avc_xperms_populate(struct avc_node *node, + struct avc_xperms_node *src) +{ + struct avc_xperms_node *dest; + struct avc_xperms_decision_node *dest_xpd; + struct avc_xperms_decision_node *src_xpd; + + if (src->xp.len == 0) + return 0; + dest = avc_xperms_alloc(); + if (!dest) + return -ENOMEM; + + memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p)); + dest->xp.len = src->xp.len; + dest->xp.base_perms = src->xp.base_perms; + + /* for each source xpd allocate a destination xpd and copy */ + list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) { + dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used); + if (!dest_xpd) + goto error; + avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd); + list_add(&dest_xpd->xpd_list, &dest->xpd_head); + } + node->ae.xp_node = dest; + return 0; +error: + avc_xperms_free(dest); + return -ENOMEM; + +} + +static inline u32 avc_xperms_audit_required(u32 requested, + struct av_decision *avd, + struct extended_perms_decision *xpd, + u8 perm, + int result, + u32 *deniedp) +{ + u32 denied, audited; + + denied = requested & ~avd->allowed; + if (unlikely(denied)) { + audited = denied & avd->auditdeny; + if (audited && xpd) { + if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT)) + audited = 0; + } + } else if (result) { + audited = denied = requested; + } else { + audited = requested & avd->auditallow; + if (audited && xpd) { + if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW)) + audited = 0; + } + } + + *deniedp = denied; + return audited; +} + +static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass, + u32 requested, struct av_decision *avd, + struct extended_perms_decision *xpd, + u8 perm, int result, + struct common_audit_data *ad) +{ + u32 audited, denied; + + audited = avc_xperms_audit_required( + requested, avd, xpd, perm, result, &denied); + if (likely(!audited)) + return 0; + return slow_avc_audit(ssid, tsid, tclass, requested, + audited, denied, result, ad); +} + static void avc_node_free(struct rcu_head *rhead) { struct avc_node *node = container_of(rhead, struct avc_node, rhead); + avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); } @@ -216,21 +438,22 @@ static void avc_node_delete(struct avc_node *node) { hlist_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); - atomic_dec(&avc_cache.active_nodes); + atomic_dec(&selinux_avc.avc_cache.active_nodes); } static void avc_node_kill(struct avc_node *node) { + avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); - atomic_dec(&avc_cache.active_nodes); + atomic_dec(&selinux_avc.avc_cache.active_nodes); } static void avc_node_replace(struct avc_node *new, struct avc_node *old) { hlist_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); - atomic_dec(&avc_cache.active_nodes); + atomic_dec(&selinux_avc.avc_cache.active_nodes); } static inline int avc_reclaim_node(void) @@ -242,9 +465,10 @@ static inline int avc_reclaim_node(void) spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { - hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); - head = &avc_cache.slots[hvalue]; - lock = &avc_cache.slots_lock[hvalue]; + hvalue = atomic_inc_return(&selinux_avc.avc_cache.lru_hint) & + (AVC_CACHE_SLOTS - 1); + head = &selinux_avc.avc_cache.slots[hvalue]; + lock = &selinux_avc.avc_cache.slots_lock[hvalue]; if (!spin_trylock_irqsave(lock, flags)) continue; @@ -271,14 +495,15 @@ static struct avc_node *avc_alloc_node(void) { struct avc_node *node; - node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); + node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT); if (!node) goto out; INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); - if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) + if (atomic_inc_return(&selinux_avc.avc_cache.active_nodes) > + selinux_avc.avc_cache_threshold) avc_reclaim_node(); out: @@ -296,11 +521,11 @@ static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tcl static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; - int hvalue; + u32 hvalue; struct hlist_head *head; hvalue = avc_hash(ssid, tsid, tclass); - head = &avc_cache.slots[hvalue]; + head = &selinux_avc.avc_cache.slots[hvalue]; hlist_for_each_entry_rcu(node, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && @@ -339,7 +564,7 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) return NULL; } -static int avc_latest_notif_update(int seqno, int is_insert) +static int avc_latest_notif_update(u32 seqno, int is_insert) { int ret = 0; static DEFINE_SPINLOCK(notif_lock); @@ -347,14 +572,14 @@ static int avc_latest_notif_update(int seqno, int is_insert) spin_lock_irqsave(¬if_lock, flag); if (is_insert) { - if (seqno < avc_cache.latest_notif) { - printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n", - seqno, avc_cache.latest_notif); + if (seqno < selinux_avc.avc_cache.latest_notif) { + pr_warn("SELinux: avc: seqno %d < latest_notif %d\n", + seqno, selinux_avc.avc_cache.latest_notif); ret = -EAGAIN; } } else { - if (seqno > avc_cache.latest_notif) - avc_cache.latest_notif = seqno; + if (seqno > selinux_avc.avc_cache.latest_notif) + selinux_avc.avc_cache.latest_notif = seqno; } spin_unlock_irqrestore(¬if_lock, flag); @@ -367,6 +592,7 @@ static int avc_latest_notif_update(int seqno, int is_insert) * @tsid: target security identifier * @tclass: target security class * @avd: resulting av decision + * @xp_node: resulting extended permissions * * Insert an AVC entry for the SID pair * (@ssid, @tsid) and class @tclass. @@ -375,44 +601,45 @@ static int avc_latest_notif_update(int seqno, int is_insert) * response to a security_compute_av() call. If the * sequence number @avd->seqno is not less than the latest * revocation notification, then the function copies - * the access vectors into a cache entry, returns - * avc_node inserted. Otherwise, this function returns NULL. + * the access vectors into a cache entry. */ -static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) +static void avc_insert(u32 ssid, u32 tsid, u16 tclass, + struct av_decision *avd, struct avc_xperms_node *xp_node) { struct avc_node *pos, *node = NULL; - int hvalue; + u32 hvalue; unsigned long flag; + spinlock_t *lock; + struct hlist_head *head; if (avc_latest_notif_update(avd->seqno, 1)) - goto out; + return; node = avc_alloc_node(); - if (node) { - struct hlist_head *head; - spinlock_t *lock; - - hvalue = avc_hash(ssid, tsid, tclass); - avc_node_populate(node, ssid, tsid, tclass, avd); + if (!node) + return; - head = &avc_cache.slots[hvalue]; - lock = &avc_cache.slots_lock[hvalue]; + avc_node_populate(node, ssid, tsid, tclass, avd); + if (avc_xperms_populate(node, xp_node)) { + avc_node_kill(node); + return; + } - spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, head, list) { - if (pos->ae.ssid == ssid && - pos->ae.tsid == tsid && - pos->ae.tclass == tclass) { - avc_node_replace(node, pos); - goto found; - } + hvalue = avc_hash(ssid, tsid, tclass); + head = &selinux_avc.avc_cache.slots[hvalue]; + lock = &selinux_avc.avc_cache.slots_lock[hvalue]; + spin_lock_irqsave(lock, flag); + hlist_for_each_entry(pos, head, list) { + if (pos->ae.ssid == ssid && + pos->ae.tsid == tsid && + pos->ae.tclass == tclass) { + avc_node_replace(node, pos); + goto found; } - hlist_add_head_rcu(&node->list, head); -found: - spin_unlock_irqrestore(lock, flag); } -out: - return node; + hlist_add_head_rcu(&node->list, head); +found: + spin_unlock_irqrestore(lock, flag); } /** @@ -424,11 +651,36 @@ out: static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; - audit_log_format(ab, "avc: %s ", - ad->selinux_audit_data->denied ? "denied" : "granted"); - avc_dump_av(ab, ad->selinux_audit_data->tclass, - ad->selinux_audit_data->audited); - audit_log_format(ab, " for "); + struct selinux_audit_data *sad = ad->selinux_audit_data; + u32 av = sad->audited, perm; + const char *const *perms; + u32 i; + + audit_log_format(ab, "avc: %s ", sad->denied ? "denied" : "granted"); + + if (av == 0) { + audit_log_format(ab, " null"); + return; + } + + perms = secclass_map[sad->tclass-1].perms; + + audit_log_format(ab, " {"); + i = 0; + perm = 1; + while (i < (sizeof(av) * 8)) { + if ((perm & av) && perms[i]) { + audit_log_format(ab, " %s", perms[i]); + av &= ~perm; + } + i++; + perm <<= 1; + } + + if (av) + audit_log_format(ab, " 0x%x", av); + + audit_log_format(ab, " } for "); } /** @@ -440,43 +692,87 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; - audit_log_format(ab, " "); - avc_dump_query(ab, ad->selinux_audit_data->ssid, - ad->selinux_audit_data->tsid, - ad->selinux_audit_data->tclass); + struct selinux_audit_data *sad = ad->selinux_audit_data; + char *scontext = NULL; + char *tcontext = NULL; + const char *tclass = NULL; + u32 scontext_len; + u32 tcontext_len; + int rc; + + rc = security_sid_to_context(sad->ssid, &scontext, + &scontext_len); + if (rc) + audit_log_format(ab, " ssid=%d", sad->ssid); + else + audit_log_format(ab, " scontext=%s", scontext); + + rc = security_sid_to_context(sad->tsid, &tcontext, + &tcontext_len); + if (rc) + audit_log_format(ab, " tsid=%d", sad->tsid); + else + audit_log_format(ab, " tcontext=%s", tcontext); + + tclass = secclass_map[sad->tclass-1].name; + audit_log_format(ab, " tclass=%s", tclass); + + if (sad->denied) + audit_log_format(ab, " permissive=%u", sad->result ? 0 : 1); + + trace_selinux_audited(sad, scontext, tcontext, tclass); + kfree(tcontext); + kfree(scontext); + + /* in case of invalid context report also the actual context string */ + rc = security_sid_to_context_inval(sad->ssid, &scontext, + &scontext_len); + if (!rc && scontext) { + if (scontext_len && scontext[scontext_len - 1] == '\0') + scontext_len--; + audit_log_format(ab, " srawcon="); + audit_log_n_untrustedstring(ab, scontext, scontext_len); + kfree(scontext); + } + + rc = security_sid_to_context_inval(sad->tsid, &scontext, + &scontext_len); + if (!rc && scontext) { + if (scontext_len && scontext[scontext_len - 1] == '\0') + scontext_len--; + audit_log_format(ab, " trawcon="); + audit_log_n_untrustedstring(ab, scontext, scontext_len); + kfree(scontext); + } } -/* This is the slow part of avc audit with big stack footprint */ +/* + * This is the slow part of avc audit with big stack footprint. + * Note that it is non-blocking and can be called from under + * rcu_read_lock(). + */ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, - u32 requested, u32 audited, u32 denied, - struct common_audit_data *a, - unsigned flags) + u32 requested, u32 audited, u32 denied, int result, + struct common_audit_data *a) { struct common_audit_data stack_data; struct selinux_audit_data sad; + if (WARN_ON(!tclass || tclass >= ARRAY_SIZE(secclass_map))) + return -EINVAL; + if (!a) { a = &stack_data; a->type = LSM_AUDIT_DATA_NONE; } - /* - * When in a RCU walk do the audit on the RCU retry. This is because - * the collection of the dname in an inode audit message is not RCU - * safe. Note this may drop some audits when the situation changes - * during retry. However this is logically just as if the operation - * happened a little later. - */ - if ((a->type == LSM_AUDIT_DATA_INODE) && - (flags & MAY_NOT_BLOCK)) - return -ECHILD; - sad.tclass = tclass; sad.requested = requested; sad.ssid = ssid; sad.tsid = tsid; sad.audited = audited; sad.denied = denied; + sad.result = result; a->selinux_audit_data = &sad; @@ -512,27 +808,31 @@ out: return rc; } -static inline int avc_sidcmp(u32 x, u32 y) -{ - return (x == y || x == SECSID_WILD || y == SECSID_WILD); -} - /** - * avc_update_node Update an AVC entry + * avc_update_node - Update an AVC entry * @event : Updating event * @perms : Permission mask bits - * @ssid,@tsid,@tclass : identifier of an AVC entry + * @driver: xperm driver information + * @base_perm: the base permission associated with the extended permission + * @xperm: xperm permissions + * @ssid: AVC entry source sid + * @tsid: AVC entry target sid + * @tclass : AVC entry target object class * @seqno : sequence number when decision was made + * @xpd: extended_perms_decision to be added to the node + * @flags: the AVC_* flags, e.g. AVC_EXTENDED_PERMS, or 0. * * if a valid AVC entry doesn't exist,this function returns -ENOENT. * if kmalloc() called internal returns NULL, this function returns -ENOMEM. * otherwise, this function updates the AVC entry. The original AVC-entry object * will release later by RCU. */ -static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, - u32 seqno) +static int avc_update_node(u32 event, u32 perms, u8 driver, u8 base_perm, + u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno, + struct extended_perms_decision *xpd, u32 flags) { - int hvalue, rc = 0; + u32 hvalue; + int rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; struct hlist_head *head; @@ -547,8 +847,8 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, /* Lock the target slot */ hvalue = avc_hash(ssid, tsid, tclass); - head = &avc_cache.slots[hvalue]; - lock = &avc_cache.slots_lock[hvalue]; + head = &selinux_avc.avc_cache.slots[hvalue]; + lock = &selinux_avc.avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); @@ -574,9 +874,19 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); + if (orig->ae.xp_node) { + rc = avc_xperms_populate(node, orig->ae.xp_node); + if (rc) { + avc_node_kill(node); + goto out_unlock; + } + } + switch (event) { case AVC_CALLBACK_GRANT: node->ae.avd.allowed |= perms; + if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS)) + avc_xperms_allow_perm(node->ae.xp_node, driver, base_perm, xperm); break; case AVC_CALLBACK_TRY_REVOKE: case AVC_CALLBACK_REVOKE: @@ -594,6 +904,13 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, case AVC_CALLBACK_AUDITDENY_DISABLE: node->ae.avd.auditdeny &= ~perms; break; + case AVC_CALLBACK_ADD_XPERMS: + rc = avc_add_xperms_decision(node, xpd); + if (rc) { + avc_node_kill(node); + goto out_unlock; + } + break; } avc_node_replace(node, orig); out_unlock: @@ -614,12 +931,12 @@ static void avc_flush(void) int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { - head = &avc_cache.slots[i]; - lock = &avc_cache.slots_lock[i]; + head = &selinux_avc.avc_cache.slots[i]; + lock = &selinux_avc.avc_cache.slots_lock[i]; spin_lock_irqsave(lock, flag); /* - * With preemptable RCU, the outer spinlock does not + * With preemptible RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); @@ -655,40 +972,155 @@ int avc_ss_reset(u32 seqno) return rc; } -/* - * Slow-path helper function for avc_has_perm_noaudit, - * when the avc_node lookup fails. We get called with - * the RCU read lock held, and need to return with it - * still held, but drop if for the security compute. +/** + * avc_compute_av - Add an entry to the AVC based on the security policy + * @ssid: subject + * @tsid: object/target + * @tclass: object class + * @avd: access vector decision + * @xp_node: AVC extended permissions node * - * Don't inline this, since it's the slow-path and just - * results in a bigger stack frame. + * Slow-path helper function for avc_has_perm_noaudit, when the avc_node lookup + * fails. Don't inline this, since it's the slow-path and just results in a + * bigger stack frame. */ -static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid, - u16 tclass, struct av_decision *avd) +static noinline void avc_compute_av(u32 ssid, u32 tsid, u16 tclass, + struct av_decision *avd, + struct avc_xperms_node *xp_node) { - rcu_read_unlock(); - security_compute_av(ssid, tsid, tclass, avd); - rcu_read_lock(); - return avc_insert(ssid, tsid, tclass, avd); + INIT_LIST_HEAD(&xp_node->xpd_head); + security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp); + avc_insert(ssid, tsid, tclass, avd, xp_node); } -static noinline int avc_denied(u32 ssid, u32 tsid, - u16 tclass, u32 requested, - unsigned flags, - struct av_decision *avd) +static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested, + u8 driver, u8 base_perm, u8 xperm, + unsigned int flags, struct av_decision *avd) { if (flags & AVC_STRICT) return -EACCES; - if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE)) + if (enforcing_enabled() && + !(avd->flags & AVD_FLAGS_PERMISSIVE)) return -EACCES; - avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, - tsid, tclass, avd->seqno); + avc_update_node(AVC_CALLBACK_GRANT, requested, driver, base_perm, + xperm, ssid, tsid, tclass, avd->seqno, NULL, flags); return 0; } +/* + * The avc extended permissions logic adds an additional 256 bits of + * permissions to an avc node when extended permissions for that node are + * specified in the avtab. If the additional 256 permissions is not adequate, + * as-is the case with ioctls, then multiple may be chained together and the + * driver field is used to specify which set contains the permission. + */ +int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, + u8 driver, u8 base_perm, u8 xperm, + struct common_audit_data *ad) +{ + struct avc_node *node; + struct av_decision avd; + u32 denied; + struct extended_perms_decision local_xpd; + struct extended_perms_decision *xpd = NULL; + struct extended_perms_data allowed; + struct extended_perms_data auditallow; + struct extended_perms_data dontaudit; + struct avc_xperms_node local_xp_node; + struct avc_xperms_node *xp_node; + int rc = 0, rc2; + + xp_node = &local_xp_node; + if (WARN_ON(!requested)) + return -EACCES; + + rcu_read_lock(); + + node = avc_lookup(ssid, tsid, tclass); + if (unlikely(!node)) { + avc_compute_av(ssid, tsid, tclass, &avd, xp_node); + } else { + memcpy(&avd, &node->ae.avd, sizeof(avd)); + xp_node = node->ae.xp_node; + } + /* if extended permissions are not defined, only consider av_decision */ + if (!xp_node || !xp_node->xp.len) + goto decision; + + local_xpd.allowed = &allowed; + local_xpd.auditallow = &auditallow; + local_xpd.dontaudit = &dontaudit; + + xpd = avc_xperms_decision_lookup(driver, base_perm, xp_node); + if (unlikely(!xpd)) { + /* + * Compute the extended_perms_decision only if the driver + * is flagged and the base permission is known. + */ + if (!security_xperm_test(xp_node->xp.drivers.p, driver) || + !(xp_node->xp.base_perms & base_perm)) { + avd.allowed &= ~requested; + goto decision; + } + rcu_read_unlock(); + security_compute_xperms_decision(ssid, tsid, tclass, driver, + base_perm, &local_xpd); + rcu_read_lock(); + avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, + base_perm, xperm, ssid, tsid, tclass, avd.seqno, + &local_xpd, 0); + } else { + avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd); + } + xpd = &local_xpd; + + if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED)) + avd.allowed &= ~requested; + +decision: + denied = requested & ~(avd.allowed); + if (unlikely(denied)) + rc = avc_denied(ssid, tsid, tclass, requested, driver, + base_perm, xperm, AVC_EXTENDED_PERMS, &avd); + + rcu_read_unlock(); + + rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, + &avd, xpd, xperm, rc, ad); + if (rc2) + return rc2; + return rc; +} + +/** + * avc_perm_nonode - Add an entry to the AVC + * @ssid: subject + * @tsid: object/target + * @tclass: object class + * @requested: requested permissions + * @flags: AVC flags + * @avd: access vector decision + * + * This is the "we have no node" part of avc_has_perm_noaudit(), which is + * unlikely and needs extra stack space for the new node that we generate, so + * don't inline it. + */ +static noinline int avc_perm_nonode(u32 ssid, u32 tsid, u16 tclass, + u32 requested, unsigned int flags, + struct av_decision *avd) +{ + u32 denied; + struct avc_xperms_node xp_node; + + avc_compute_av(ssid, tsid, tclass, avd, &xp_node); + denied = requested & ~(avd->allowed); + if (unlikely(denied)) + return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0, + flags, avd); + return 0; +} /** * avc_has_perm_noaudit - Check permissions but perform no auditing. @@ -711,32 +1143,31 @@ static noinline int avc_denied(u32 ssid, u32 tsid, * should be released for the auditing. */ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, - u16 tclass, u32 requested, - unsigned flags, - struct av_decision *avd) + u16 tclass, u32 requested, + unsigned int flags, + struct av_decision *avd) { - struct avc_node *node; - int rc = 0; u32 denied; + struct avc_node *node; - BUG_ON(!requested); + if (WARN_ON(!requested)) + return -EACCES; rcu_read_lock(); - node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) { - node = avc_compute_av(ssid, tsid, tclass, avd); - } else { - memcpy(avd, &node->ae.avd, sizeof(*avd)); - avd = &node->ae.avd; + rcu_read_unlock(); + return avc_perm_nonode(ssid, tsid, tclass, requested, + flags, avd); } + denied = requested & ~node->ae.avd.allowed; + memcpy(avd, &node->ae.avd, sizeof(*avd)); + rcu_read_unlock(); - denied = requested & ~(avd->allowed); if (unlikely(denied)) - rc = avc_denied(ssid, tsid, tclass, requested, flags, avd); - - rcu_read_unlock(); - return rc; + return avc_denied(ssid, tsid, tclass, requested, 0, 0, 0, + flags, avd); + return 0; } /** @@ -746,7 +1177,6 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @auditdata: auxiliary audit data - * @flags: VFS walk flags * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions @@ -756,17 +1186,17 @@ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, * permissions are granted, -%EACCES if any permissions are denied, or * another -errno upon other errors. */ -int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, - u32 requested, struct common_audit_data *auditdata, - unsigned flags) +int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, + u32 requested, struct common_audit_data *auditdata) { struct av_decision avd; int rc, rc2; - rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); + rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, + &avd); - rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, - flags); + rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, + auditdata); if (rc2) return rc2; return rc; @@ -774,24 +1204,5 @@ int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass, u32 avc_policy_seqno(void) { - return avc_cache.latest_notif; -} - -void avc_disable(void) -{ - /* - * If you are looking at this because you have realized that we are - * not destroying the avc_node_cachep it might be easy to fix, but - * I don't know the memory barrier semantics well enough to know. It's - * possible that some other task dereferenced security_ops when - * it still pointed to selinux operations. If that is the case it's - * possible that it is about to use the avc and is about to need the - * avc_node_cachep. I know I could wrap the security.c security_ops call - * in an rcu_lock, but seriously, it's not worth it. Instead I just flush - * the cache and get that memory back. - */ - if (avc_node_cachep) { - avc_flush(); - /* kmem_cache_destroy(avc_node_cachep); */ - } + return selinux_avc.avc_cache.latest_notif; } |
