summaryrefslogtreecommitdiff
path: root/security/integrity/ima
diff options
context:
space:
mode:
Diffstat (limited to 'security/integrity/ima')
-rw-r--r--security/integrity/ima/Kconfig292
-rw-r--r--security/integrity/ima/Makefile14
-rw-r--r--security/integrity/ima/ima.h540
-rw-r--r--security/integrity/ima/ima_api.c375
-rw-r--r--security/integrity/ima/ima_appraise.c803
-rw-r--r--security/integrity/ima/ima_asymmetric_keys.c66
-rw-r--r--security/integrity/ima/ima_crypto.c853
-rw-r--r--security/integrity/ima/ima_efi.c78
-rw-r--r--security/integrity/ima/ima_fs.c412
-rw-r--r--security/integrity/ima/ima_iint.c138
-rw-r--r--security/integrity/ima/ima_init.c139
-rw-r--r--security/integrity/ima/ima_kexec.c298
-rw-r--r--security/integrity/ima/ima_main.c1242
-rw-r--r--security/integrity/ima/ima_modsig.c151
-rw-r--r--security/integrity/ima/ima_mok.c49
-rw-r--r--security/integrity/ima/ima_policy.c2119
-rw-r--r--security/integrity/ima/ima_queue.c204
-rw-r--r--security/integrity/ima/ima_queue_keys.c177
-rw-r--r--security/integrity/ima/ima_template.c536
-rw-r--r--security/integrity/ima/ima_template_lib.c764
-rw-r--r--security/integrity/ima/ima_template_lib.h69
21 files changed, 8444 insertions, 875 deletions
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 39196abaff0d..976e75f9b9ba 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -1,17 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
# IBM Integrity Measurement Architecture
#
config IMA
bool "Integrity Measurement Architecture(IMA)"
- depends on SECURITY
- select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
- select CRYPTO_MD5
select CRYPTO_SHA1
- select TCG_TPM if HAS_IOMEM && !UML
+ select CRYPTO_HASH_INFO
+ select SECURITY_PATH
+ select TCG_TPM if HAS_IOMEM
select TCG_TIS if TCG_TPM && X86
- select TCG_IBMVTPM if TCG_TPM && PPC64
+ select TCG_CRB if TCG_TPM && ACPI
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+ select INTEGRITY_AUDIT if AUDIT
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
@@ -24,13 +26,26 @@ config IMA
an aggregate integrity value over this list inside the
TPM hardware, so that the TPM can prove to a third party
whether or not critical system files have been modified.
- Read <http://www.usenix.org/events/sec04/tech/sailer.html>
+ Read <https://www.usenix.org/events/sec04/tech/sailer.html>
to learn more about IMA.
If unsure, say N.
+if IMA
+
+config IMA_KEXEC
+ bool "Enable carrying the IMA measurement list across a soft boot"
+ depends on TCG_TPM && HAVE_IMA_KEXEC
+ default n
+ help
+ TPM PCRs are only reset on a hard reboot. In order to validate
+ a TPM's quote after a soft boot, the IMA measurement list of the
+ running kernel must be saved and restored on boot.
+
+ Depending on the IMA policy, the measurement list can grow to
+ be very large.
+
config IMA_MEASURE_PCR_IDX
int
- depends on IMA
range 8 14
default 10
help
@@ -40,14 +55,94 @@ config IMA_MEASURE_PCR_IDX
config IMA_LSM_RULES
bool
- depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
+ depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
default y
help
Disabling this option will disregard LSM based policy rules.
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames. The configured default template can be replaced
+ by specifying "ima_template=" on the boot command line.
+
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1=y
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256=y
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512=y
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512=y
+
+ config IMA_DEFAULT_HASH_SM3
+ bool "SM3"
+ depends on CRYPTO_SM3_GENERIC=y
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+ default "sm3" if IMA_DEFAULT_HASH_SM3
+
+config IMA_WRITE_POLICY
+ bool "Enable multiple writes to the IMA policy"
+ default n
+ help
+ IMA policy can now be updated multiple times. The new rules get
+ appended to the original policy. Have in mind that the rules are
+ scanned in FIFO order so be careful when you design and add new ones.
+
+ If unsure, say N.
+
+config IMA_READ_POLICY
+ bool "Enable reading back the current IMA policy"
+ default y if IMA_WRITE_POLICY
+ default n if !IMA_WRITE_POLICY
+ help
+ It is often useful to be able to read back the IMA policy. It is
+ even more important after introducing CONFIG_IMA_WRITE_POLICY.
+ This option allows the root user to see the current policy rules.
+
config IMA_APPRAISE
bool "Appraise integrity measurements"
- depends on IMA
default n
help
This option enables local measurement integrity appraisal.
@@ -59,3 +154,182 @@ config IMA_APPRAISE
For more information on integrity appraisal refer to:
<http://linux-ima.sourceforge.net>
If unsure, say N.
+
+config IMA_ARCH_POLICY
+ bool "Enable loading an IMA architecture specific policy"
+ depends on (KEXEC_SIG && IMA) || IMA_APPRAISE \
+ && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option enables loading an IMA architecture specific policy
+ based on run time secure boot flags.
+
+config IMA_APPRAISE_BUILD_POLICY
+ bool "IMA build time configured policy rules"
+ depends on IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option defines an IMA appraisal policy at build time, which
+ is enforced at run time without having to specify a builtin
+ policy name on the boot command line. The build time appraisal
+ policy rules persist after loading a custom policy.
+
+ Depending on the rules configured, this policy may require kernel
+ modules, firmware, the kexec kernel image, and/or the IMA policy
+ to be signed. Unsigned files might prevent the system from
+ booting or applications from working properly.
+
+config IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ bool "Appraise firmware signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ This option defines a policy requiring all firmware to be signed,
+ including the regulatory.db. If both this option and
+ CFG80211_REQUIRE_SIGNED_REGDB are enabled, then both signature
+ verification methods are necessary.
+
+config IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ bool "Appraise kexec kernel image signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kexec'ed kernel images to
+ be signed and verified by a public key on the trusted IMA
+ keyring.
+
+ Kernel image signatures can not be verified by the original
+ kexec_load syscall. Enabling this rule will prevent its
+ usage.
+
+config IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ bool "Appraise kernel modules signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kernel modules to be signed
+ and verified by a public key on the trusted IMA keyring.
+
+ Kernel module signatures can only be verified by IMA-appraisal,
+ via the finit_module syscall. Enabling this rule will prevent
+ the usage of the init_module syscall.
+
+config IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ bool "Appraise IMA policy signature"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require the IMA policy to be signed and
+ and verified by a key on the trusted IMA keyring.
+
+config IMA_APPRAISE_BOOTPARAM
+ bool "ima_appraise boot parameter"
+ depends on IMA_APPRAISE
+ default y
+ help
+ This option enables the different "ima_appraise=" modes
+ (eg. fix, log) from the boot command line.
+
+config IMA_APPRAISE_MODSIG
+ bool "Support module-style signatures for appraisal"
+ depends on IMA_APPRAISE
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select PKCS7_MESSAGE_PARSER
+ select MODULE_SIG_FORMAT
+ default n
+ help
+ Adds support for signatures appended to files. The format of the
+ appended signature is the same used for signed kernel modules.
+ The modsig keyword can be used in the IMA policy to allow a hook
+ to accept such signatures.
+
+config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ bool "Permit keys validly signed by a built-in, machine (if configured) or secondary"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on SECONDARY_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ Keys may be added to the IMA or IMA blacklist keyrings, if the
+ key is validly signed by a CA cert in the system built-in,
+ machine (if configured), or secondary trusted keyrings. The
+ key must also have the digitalSignature usage set.
+
+ Intermediate keys between those the kernel has compiled in and the
+ IMA keys to be added may be added to the system secondary keyring,
+ provided they are validly signed by a key already resident in the
+ built-in, machine (if configured) or secondary trusted keyrings.
+
+config IMA_BLACKLIST_KEYRING
+ bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ This option creates an IMA blacklist keyring, which contains all
+ revoked IMA keys. It is consulted before any other keyring. If
+ the search is successful the requested operation is rejected and
+ an error is returned to the caller.
+
+config IMA_LOAD_X509
+ bool "Load X509 certificate onto the '.ima' trusted keyring"
+ depends on INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ File signature verification is based on the public keys
+ loaded on the .ima trusted keyring. These public keys are
+ X509 certificates signed by a trusted key on the
+ .system keyring. This option enables X509 certificate
+ loading from the kernel onto the '.ima' trusted keyring.
+
+config IMA_X509_PATH
+ string "IMA X509 certificate path"
+ depends on IMA_LOAD_X509
+ default "/etc/keys/x509_ima.der"
+ help
+ This option defines IMA X509 certificate path.
+
+config IMA_APPRAISE_SIGNED_INIT
+ bool "Require signed user-space initialization"
+ depends on IMA_LOAD_X509
+ default n
+ help
+ This option requires user-space init to be signed.
+
+config IMA_MEASURE_ASYMMETRIC_KEYS
+ bool
+ depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+ default y
+
+config IMA_QUEUE_EARLY_BOOT_KEYS
+ bool
+ depends on IMA_MEASURE_ASYMMETRIC_KEYS
+ depends on SYSTEM_TRUSTED_KEYRING
+ default y
+
+config IMA_SECURE_AND_OR_TRUSTED_BOOT
+ bool
+ depends on IMA_ARCH_POLICY
+ help
+ This option is selected by architectures to enable secure and/or
+ trusted boot based on IMA runtime policies.
+
+config IMA_DISABLE_HTABLE
+ bool "Disable htable to allow measurement of duplicate records"
+ default n
+ help
+ This option disables htable to allow measurement of duplicate records.
+
+config IMA_KEXEC_EXTRA_MEMORY_KB
+ int "Extra memory for IMA measurements added during kexec soft reboot"
+ range 0 40
+ depends on IMA_KEXEC
+ default 0
+ help
+ IMA_KEXEC_EXTRA_MEMORY_KB determines the extra memory to be
+ allocated (in kb) for IMA measurements added during kexec soft reboot.
+ If set to the default value of 0, an extra half page of memory for those
+ additional measurements will be allocated.
+
+endif
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 56dfee7cbf61..b376d38b4ee6 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -1,10 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for building Trusted Computing Group's(TCG) runtime Integrity
# Measurement Architecture(IMA).
#
-obj-$(CONFIG_IMA) += ima.o
+obj-$(CONFIG_IMA) += ima.o ima_iint.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o
+ ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_IMA_APPRAISE_MODSIG) += ima_modsig.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
+ima-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
+ima-$(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) += ima_asymmetric_keys.o
+ima-$(CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS) += ima_queue_keys.o
+
+ifeq ($(CONFIG_EFI),y)
+ima-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_efi.o
+endif
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index b3dd616560f7..e3d71d8d56e3 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -5,11 +6,6 @@
* Reiner Sailer <sailer@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima.h
* internal Integrity Measurement Architecture (IMA) definitions
*/
@@ -19,40 +15,100 @@
#include <linux/types.h>
#include <linux/crypto.h>
+#include <linux/fs.h>
#include <linux/security.h>
#include <linux/hash.h>
#include <linux/tpm.h>
#include <linux/audit.h>
+#include <crypto/hash_info.h>
#include "../integrity.h"
-enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
/* digest size for IMA, fits SHA1 or MD5 */
#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
-#define IMA_HASH_BITS 9
+#define IMA_HASH_BITS 10
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
+#define NR_BANKS(chip) ((chip != NULL) ? chip->nr_allocated_banks : 0)
+
+/* current content of the policy */
+extern int ima_policy_flag;
+
+/* bitset of digests algorithms allowed in the setxattr hook */
+extern atomic_t ima_setxattr_allowed_hash_algorithms;
+
+/* IMA hash algorithm description */
+struct ima_algo_desc {
+ struct crypto_shash *tfm;
+ enum hash_algo algo;
+};
+
/* set during initialization */
-extern int ima_initialized;
-extern int ima_used_chip;
-extern char *ima_hash;
+extern int ima_hash_algo __ro_after_init;
+extern int ima_sha1_idx __ro_after_init;
+extern int ima_hash_algo_idx __ro_after_init;
+extern int ima_extra_slots __ro_after_init;
+extern struct ima_algo_desc *ima_algo_array __ro_after_init;
+
extern int ima_appraise;
+extern struct tpm_chip *ima_tpm_chip;
+extern const char boot_aggregate_name[];
+
+/* IMA event related data */
+struct ima_event_data {
+ struct ima_iint_cache *iint;
+ struct file *file;
+ const unsigned char *filename;
+ struct evm_ima_xattr_data *xattr_value;
+ int xattr_len;
+ const struct modsig *modsig;
+ const char *violation;
+ const void *buf;
+ int buf_len;
+};
+
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init)(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+ void (*field_show)(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
-/* IMA inode template definition */
-struct ima_template_data {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
- char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ struct list_head list;
+ char *name;
+ char *fmt;
+ int num_fields;
+ const struct ima_template_field **fields;
};
struct ima_template_entry {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
- const char *template_name;
- int template_len;
- struct ima_template_data template;
+ int pcr;
+ struct tpm_digest *digests;
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[]; /* template related data */
};
struct ima_queue_entry {
@@ -62,20 +118,179 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+ u16 version;
+ u16 _reserved0;
+ u32 _reserved1;
+ u64 buffer_size;
+ u64 count;
+};
+
+/* IMA iint action cache flags */
+#define IMA_MEASURE 0x00000001
+#define IMA_MEASURED 0x00000002
+#define IMA_APPRAISE 0x00000004
+#define IMA_APPRAISED 0x00000008
+/*#define IMA_COLLECT 0x00000010 do not use this flag */
+#define IMA_COLLECTED 0x00000020
+#define IMA_AUDIT 0x00000040
+#define IMA_AUDITED 0x00000080
+#define IMA_HASH 0x00000100
+#define IMA_HASHED 0x00000200
+
+/* IMA iint policy rule cache flags */
+#define IMA_NONACTION_FLAGS 0xff000000
+#define IMA_DIGSIG_REQUIRED 0x01000000
+#define IMA_PERMIT_DIRECTIO 0x02000000
+#define IMA_NEW_FILE 0x04000000
+#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
+#define IMA_MODSIG_ALLOWED 0x20000000
+#define IMA_CHECK_BLACKLIST 0x40000000
+#define IMA_VERITY_REQUIRED 0x80000000
+
+/* Exclude non-action flags which are not rule-specific. */
+#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE)
+
+#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_HASH | IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+ IMA_HASHED | IMA_COLLECTED | \
+ IMA_APPRAISED_SUBMASK)
+
+/* IMA iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE 0x00001000
+#define IMA_FILE_APPRAISED 0x00002000
+#define IMA_MMAP_APPRAISE 0x00004000
+#define IMA_MMAP_APPRAISED 0x00008000
+#define IMA_BPRM_APPRAISE 0x00010000
+#define IMA_BPRM_APPRAISED 0x00020000
+#define IMA_READ_APPRAISE 0x00040000
+#define IMA_READ_APPRAISED 0x00080000
+#define IMA_CREDS_APPRAISE 0x00100000
+#define IMA_CREDS_APPRAISED 0x00200000
+#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+ IMA_BPRM_APPRAISE | IMA_READ_APPRAISE | \
+ IMA_CREDS_APPRAISE)
+#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_READ_APPRAISED | \
+ IMA_CREDS_APPRAISED)
+
+/* IMA iint cache atomic_flags */
+#define IMA_CHANGE_XATTR 0
+#define IMA_UPDATE_XATTR 1
+#define IMA_CHANGE_ATTR 2
+#define IMA_DIGSIG 3
+#define IMA_MAY_EMIT_TOMTOU 4
+#define IMA_EMITTED_OPENWRITERS 5
+
+/* IMA integrity metadata associated with an inode */
+struct ima_iint_cache {
+ struct mutex mutex; /* protects: version, flags, digest */
+ struct integrity_inode_attributes real_inode;
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+ enum integrity_status ima_read_status:4;
+ enum integrity_status ima_creds_status:4;
+ struct ima_digest_data *ima_hash;
+};
+
+extern struct lsm_blob_sizes ima_blob_sizes;
+
+static inline struct ima_iint_cache *
+ima_inode_get_iint(const struct inode *inode)
+{
+ struct ima_iint_cache **iint_sec;
+
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ iint_sec = inode->i_security + ima_blob_sizes.lbs_inode;
+ return *iint_sec;
+}
+
+static inline void ima_inode_set_iint(const struct inode *inode,
+ struct ima_iint_cache *iint)
+{
+ struct ima_iint_cache **iint_sec;
+
+ if (unlikely(!inode->i_security))
+ return;
+
+ iint_sec = inode->i_security + ima_blob_sizes.lbs_inode;
+ *iint_sec = iint;
+}
+
+struct ima_iint_cache *ima_iint_find(struct inode *inode);
+struct ima_iint_cache *ima_inode_get(struct inode *inode);
+void ima_inode_free_rcu(void *inode_security);
+void __init ima_iintcache_init(void);
+
+extern const int read_idmap[];
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
+void ima_post_key_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t plen,
+ unsigned long flags, bool create);
+#endif
+
+#ifdef CONFIG_IMA_KEXEC
+void ima_measure_kexec_event(const char *event_name);
+#else
+static inline void ima_measure_kexec_event(const char *event_name) {}
+#endif
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format. The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
/* Internal IMA function definitions */
int ima_init(void);
-void ima_cleanup(void);
int ima_fs_init(void);
-void ima_fs_cleanup(void);
-int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode);
-int ima_calc_file_hash(struct file *file, char *digest);
-int ima_calc_buffer_hash(const void *data, int len, char *digest);
-int ima_calc_boot_aggregate(char *digest);
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
- const char *op, const char *cause);
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_entry *entry);
+int ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct ima_iint_cache *iint, const char *op,
+ const char *cause);
int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields);
+struct ima_template_desc *ima_template_desc_current(void);
+struct ima_template_desc *ima_template_desc_buf(void);
+struct ima_template_desc *lookup_template_desc(const char *name);
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
+int ima_init_template(void);
+void ima_init_template_list(void);
+int __init ima_init_digests(void);
+void __init ima_init_reboot_notifier(void);
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data);
/*
* used to protect h_table and sha_table
@@ -89,100 +304,279 @@ struct ima_h_table {
};
extern struct ima_h_table ima_htable;
-static inline unsigned long ima_hash_key(u8 *digest)
+static inline unsigned int ima_hash_key(u8 *digest)
{
- return hash_long(*digest, IMA_HASH_BITS);
+ /* there is no point in taking a hash of part of a digest */
+ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
}
+#define __ima_hooks(hook) \
+ hook(NONE, none) \
+ hook(FILE_CHECK, file) \
+ hook(MMAP_CHECK, mmap) \
+ hook(MMAP_CHECK_REQPROT, mmap_reqprot) \
+ hook(BPRM_CHECK, bprm) \
+ hook(CREDS_CHECK, creds) \
+ hook(POST_SETATTR, post_setattr) \
+ hook(MODULE_CHECK, module) \
+ hook(FIRMWARE_CHECK, firmware) \
+ hook(KEXEC_KERNEL_CHECK, kexec_kernel) \
+ hook(KEXEC_INITRAMFS_CHECK, kexec_initramfs) \
+ hook(POLICY_CHECK, policy) \
+ hook(KEXEC_CMDLINE, kexec_cmdline) \
+ hook(KEY_CHECK, key) \
+ hook(CRITICAL_DATA, critical_data) \
+ hook(SETXATTR_CHECK, setxattr_check) \
+ hook(MAX_CHECK, none)
+
+#define __ima_hook_enumify(ENUM, str) ENUM,
+#define __ima_stringify(arg) (#arg)
+#define __ima_hook_measuring_stringify(ENUM, str) \
+ (__ima_stringify(measuring_ ##str)),
+
+enum ima_hooks {
+ __ima_hooks(__ima_hook_enumify)
+};
+
+static const char * const ima_hooks_measure_str[] = {
+ __ima_hooks(__ima_hook_measuring_stringify)
+};
+
+static inline const char *func_measure_str(enum ima_hooks func)
+{
+ if (func >= MAX_CHECK)
+ return ima_hooks_measure_str[NONE];
+
+ return ima_hooks_measure_str[func];
+}
+
+extern const char *const func_tokens[];
+
+struct modsig;
+
+#ifdef CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS
+/*
+ * To track keys that need to be measured.
+ */
+struct ima_key_entry {
+ struct list_head list;
+ void *payload;
+ size_t payload_len;
+ char *keyring_name;
+};
+void ima_init_key_queue(void);
+bool ima_should_queue_key(void);
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len);
+void ima_process_queued_keys(void);
+#else
+static inline void ima_init_key_queue(void) {}
+static inline bool ima_should_queue_key(void) { return false; }
+static inline bool ima_queue_key(struct key *keyring,
+ const void *payload,
+ size_t payload_len) { return false; }
+static inline void ima_process_queued_keys(void) {}
+#endif /* CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS */
+
/* LIM API function definitions */
-int ima_get_action(struct inode *inode, int mask, int function);
-int ima_must_measure(struct inode *inode, int mask, int function);
-int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file);
-void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
- const unsigned char *filename);
-void ima_audit_measurement(struct integrity_iint_cache *iint,
+int ima_get_action(struct mnt_idmap *idmap, struct inode *inode,
+ const struct cred *cred, struct lsm_prop *prop, int mask,
+ enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos);
+int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
+int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file,
+ void *buf, loff_t size, enum hash_algo algo,
+ struct modsig *modsig);
+void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc);
+int process_buffer_measurement(struct mnt_idmap *idmap,
+ struct inode *inode, const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *func_data,
+ bool buf_hash, u8 *digest, size_t digest_len);
+void ima_audit_measurement(struct ima_iint_cache *iint,
const unsigned char *filename);
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry,
+ struct ima_template_desc *template_desc);
int ima_store_template(struct ima_template_entry *entry, int violation,
- struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
-const char *ima_d_path(struct path *path, char **pathbuf);
-
-/* rbtree tree calls to lookup, insert, delete
- * integrity data associated with an inode.
- */
-struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
-struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+ struct inode *inode,
+ const unsigned char *filename, int pcr);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
/* IMA policy related functions */
-enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, POST_SETATTR };
-
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
- int flags);
+int ima_match_policy(struct mnt_idmap *idmap, struct inode *inode,
+ const struct cred *cred, struct lsm_prop *prop,
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos);
void ima_init_policy(void);
void ima_update_policy(void);
+void ima_update_policy_flags(void);
ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
+int ima_check_policy(void);
+void *ima_policy_start(struct seq_file *m, loff_t *pos);
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos);
+void ima_policy_stop(struct seq_file *m, void *v);
+int ima_policy_show(struct seq_file *m, void *v);
/* Appraise integrity measurements */
#define IMA_APPRAISE_ENFORCE 0x01
#define IMA_APPRAISE_FIX 0x02
-#define IMA_APPRAISE_MODULES 0x04
+#define IMA_APPRAISE_LOG 0x04
+#define IMA_APPRAISE_MODULES 0x08
+#define IMA_APPRAISE_FIRMWARE 0x10
+#define IMA_APPRAISE_POLICY 0x20
+#define IMA_APPRAISE_KEXEC 0x40
#ifdef CONFIG_IMA_APPRAISE
-int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename);
-int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
-void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
-enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
- int func);
+int ima_check_blacklist(struct ima_iint_cache *iint,
+ const struct modsig *modsig, int pcr);
+int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig);
+int ima_must_appraise(struct mnt_idmap *idmap, struct inode *inode,
+ int mask, enum ima_hooks func);
+void ima_update_xattr(struct ima_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct ima_iint_cache *iint,
+ enum ima_hooks func);
+enum hash_algo ima_get_hash_algo(const struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value, int xattr_len);
+void __init init_ima_appraise_lsm(const struct lsm_id *lsmid);
#else
-static inline int ima_appraise_measurement(int func,
- struct integrity_iint_cache *iint,
+static inline int ima_check_blacklist(struct ima_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ return 0;
+}
+
+static inline int ima_appraise_measurement(enum ima_hooks func,
+ struct ima_iint_cache *iint,
struct file *file,
- const unsigned char *filename)
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ const struct modsig *modsig)
{
return INTEGRITY_UNKNOWN;
}
-static inline int ima_must_appraise(struct inode *inode, int mask,
+static inline int ima_must_appraise(struct mnt_idmap *idmap,
+ struct inode *inode, int mask,
enum ima_hooks func)
{
return 0;
}
-static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+static inline void ima_update_xattr(struct ima_iint_cache *iint,
struct file *file)
{
}
-static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
- *iint, int func)
+static inline enum integrity_status
+ima_get_cache_status(struct ima_iint_cache *iint, enum ima_hooks func)
{
return INTEGRITY_UNKNOWN;
}
-#endif
+
+static inline enum hash_algo
+ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len)
+{
+ return ima_hash_algo;
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value,
+ int xattr_len)
+{
+ return 0;
+}
+
+static inline void __init init_ima_appraise_lsm(const struct lsm_id *lsmid)
+{
+}
+
+#endif /* CONFIG_IMA_APPRAISE */
+
+#ifdef CONFIG_IMA_APPRAISE_MODSIG
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig);
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size);
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size);
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len);
+void ima_free_modsig(struct modsig *modsig);
+#else
+static inline int ima_read_modsig(enum ima_hooks func, const void *buf,
+ loff_t buf_len, struct modsig **modsig)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_collect_modsig(struct modsig *modsig, const void *buf,
+ loff_t size)
+{
+}
+
+static inline int ima_get_modsig_digest(const struct modsig *modsig,
+ enum hash_algo *algo, const u8 **digest,
+ u32 *digest_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ima_get_raw_modsig(const struct modsig *modsig,
+ const void **data, u32 *data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ima_free_modsig(struct modsig *modsig)
+{
+}
+#endif /* CONFIG_IMA_APPRAISE_MODSIG */
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
-#define security_filter_rule_init security_audit_rule_init
-#define security_filter_rule_match security_audit_rule_match
+#define ima_filter_rule_init security_audit_rule_init
+#define ima_filter_rule_free security_audit_rule_free
+#define ima_filter_rule_match security_audit_rule_match
#else
-static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
+ void **lsmrule, gfp_t gfp)
{
return -EINVAL;
}
-static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule,
- struct audit_context *actx)
+static inline void ima_filter_rule_free(void *lsmrule)
+{
+}
+
+static inline int ima_filter_rule_match(struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
{
return -EINVAL;
}
#endif /* CONFIG_IMA_LSM_RULES */
-#endif
+
+#ifdef CONFIG_IMA_READ_POLICY
+#define POLICY_FILE_FLAGS (S_IWUSR | S_IRUSR)
+#else
+#define POLICY_FILE_FLAGS S_IWUSR
+#endif /* CONFIG_IMA_READ_POLICY */
+
+#endif /* __LINUX_IMA_H */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 1c03e8f1e0e1..c35ea613c9f8 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -1,26 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 IBM Corporation
*
* Author: Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima_api.c
* Implements must_appraise_or_measure, collect_measurement,
* appraise_measurement, store_measurement and store_template.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/evm.h>
+#include <linux/fsverity.h>
+
#include "ima.h"
-static const char *IMA_TEMPLATE_NAME = "ima";
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry->digests);
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry,
+ struct ima_template_desc *desc)
+{
+ struct ima_template_desc *template_desc;
+ struct tpm_digest *digests;
+ int i, result = 0;
+
+ if (desc)
+ template_desc = desc;
+ else
+ template_desc = ima_template_desc_current();
+
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ *entry = NULL;
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ const struct ima_template_field *field =
+ template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(event_data,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
/*
* ima_store_template - store ima template measurements
@@ -39,28 +100,26 @@ static const char *IMA_TEMPLATE_NAME = "ima";
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
- int violation, struct inode *inode)
+ int violation, struct inode *inode,
+ const unsigned char *filename, int pcr)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "hashing_error";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "hashing_error";
+ char *template_name = entry->template_desc->name;
int result;
- memset(entry->digest, 0, sizeof(entry->digest));
- entry->template_name = IMA_TEMPLATE_NAME;
- entry->template_len = sizeof(entry->template);
-
if (!violation) {
- result = ima_calc_buffer_hash(&entry->template,
- entry->template_len,
- entry->digest);
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template_name, op,
+ template_name, op,
audit_cause, result, 0);
return result;
}
}
- result = ima_add_template_entry(entry, violation, op, inode);
+ entry->pcr = pcr;
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
return result;
}
@@ -71,26 +130,31 @@ int ima_store_template(struct ima_template_entry *entry,
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
- const char *op, const char *cause)
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct ima_iint_cache *iint, const char *op,
+ const char *cause)
{
struct ima_template_entry *entry;
+ struct inode *inode = file_inode(file);
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .violation = cause };
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
+ if (result < 0) {
result = -ENOMEM;
goto err_out;
}
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode,
+ filename, CONFIG_IMA_MEASURE_PCR_IDX);
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, cause, result, 0);
@@ -98,33 +162,70 @@ err_out:
/**
* ima_get_action - appraise & measure decision based on policy.
- * @inode: pointer to inode to measure
- * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
- * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK)
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: pointer to the inode associated with the object being validated
+ * @cred: pointer to credentials structure to validate
+ * @prop: properties of the task being validated
+ * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC,
+ * MAY_APPEND)
+ * @func: caller identifier
+ * @pcr: pointer filled in if matched measure policy sets pcr=
+ * @template_desc: pointer filled in if matched measure policy sets template=
+ * @func_data: func specific data, may be NULL
+ * @allowed_algos: allowlist of hash algorithms for the IMA xattr
*
* The policy is defined in terms of keypairs:
- * subj=, obj=, type=, func=, mask=, fsmagic=
+ * subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
- * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK
- * mask: contains the permission mask
+ * func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
+ * | KEXEC_CMDLINE | KEY_CHECK | CRITICAL_DATA | SETXATTR_CHECK
+ * | MMAP_CHECK_REQPROT
+ * mask: contains the permission mask
* fsmagic: hex value
*
* Returns IMA_MEASURE, IMA_APPRAISE mask.
*
*/
-int ima_get_action(struct inode *inode, int mask, int function)
+int ima_get_action(struct mnt_idmap *idmap, struct inode *inode,
+ const struct cred *cred, struct lsm_prop *prop, int mask,
+ enum ima_hooks func, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos)
{
- int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
- if (!ima_appraise)
- flags &= ~IMA_APPRAISE;
+ flags &= ima_policy_flag;
- return ima_match_policy(inode, function, mask, flags);
+ return ima_match_policy(idmap, inode, cred, prop, func, mask,
+ flags, pcr, template_desc, func_data,
+ allowed_algos);
}
-int ima_must_measure(struct inode *inode, int mask, int function)
+static bool ima_get_verity_digest(struct ima_iint_cache *iint,
+ struct inode *inode,
+ struct ima_max_digest_data *hash)
{
- return ima_match_policy(inode, function, mask, IMA_MEASURE);
+ enum hash_algo alg;
+ int digest_len;
+
+ /*
+ * On failure, 'measure' policy rules will result in a file data
+ * hash containing 0's.
+ */
+ digest_len = fsverity_get_digest(inode, hash->digest, NULL, &alg);
+ if (digest_len == 0)
+ return false;
+
+ /*
+ * Unlike in the case of actually calculating the file hash, in
+ * the fsverity case regardless of the hash algorithm, return
+ * the verity digest to be included in the measurement list. A
+ * mismatch between the verity algorithm and the xattr signature
+ * algorithm, if one exists, will be detected later.
+ */
+ hash->hdr.algo = alg;
+ hash->hdr.length = digest_len;
+ return true;
}
/*
@@ -137,27 +238,95 @@ int ima_must_measure(struct inode *inode, int mask, int function)
*
* Return 0 on success, error code otherwise
*/
-int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file)
+int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file,
+ void *buf, loff_t size, enum hash_algo algo,
+ struct modsig *modsig)
{
+ const char *audit_cause = "failed";
struct inode *inode = file_inode(file);
- const char *filename = file->f_dentry->d_name.name;
+ struct inode *real_inode = d_real_inode(file_dentry(file));
+ struct ima_max_digest_data hash;
+ struct ima_digest_data *hash_hdr = container_of(&hash.hdr,
+ struct ima_digest_data, hdr);
+ struct name_snapshot filename;
+ struct kstat stat;
int result = 0;
+ int length;
+ void *tmpbuf;
+ u64 i_version = 0;
- if (!(iint->flags & IMA_COLLECTED)) {
- u64 i_version = file_inode(file)->i_version;
+ /*
+ * Always collect the modsig, because IMA might have already collected
+ * the file digest without collecting the modsig in a previous
+ * measurement rule.
+ */
+ if (modsig)
+ ima_collect_modsig(modsig, buf, size);
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- result = ima_calc_file_hash(file, iint->ima_xattr.digest);
- if (!result) {
- iint->version = i_version;
- iint->flags |= IMA_COLLECTED;
+ if (iint->flags & IMA_COLLECTED)
+ goto out;
+
+ /*
+ * Detecting file change is based on i_version. On filesystems
+ * which do not support i_version, support was originally limited
+ * to an initial measurement/appraisal/audit, but was modified to
+ * assume the file changed.
+ */
+ result = vfs_getattr_nosec(&file->f_path, &stat, STATX_CHANGE_COOKIE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!result && (stat.result_mask & STATX_CHANGE_COOKIE))
+ i_version = stat.change_cookie;
+ hash.hdr.algo = algo;
+ hash.hdr.length = hash_digest_size[algo];
+
+ /* Initialize hash digest to 0's in case of failure */
+ memset(&hash.digest, 0, sizeof(hash.digest));
+
+ if (iint->flags & IMA_VERITY_REQUIRED) {
+ if (!ima_get_verity_digest(iint, inode, &hash)) {
+ audit_cause = "no-verity-digest";
+ result = -ENODATA;
}
+ } else if (buf) {
+ result = ima_calc_buffer_hash(buf, size, hash_hdr);
+ } else {
+ result = ima_calc_file_hash(file, hash_hdr);
+ }
+
+ if (result && result != -EBADF && result != -EINVAL)
+ goto out;
+
+ length = sizeof(hash.hdr) + hash.hdr.length;
+ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ goto out;
}
- if (result)
+
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ if (real_inode == inode)
+ iint->real_inode.version = i_version;
+ else
+ integrity_inode_attrs_store(&iint->real_inode, i_version,
+ real_inode);
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+ iint->flags |= IMA_COLLECTED;
+out:
+ if (result) {
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
+ take_dentry_name_snapshot(&filename, file->f_path.dentry);
+
integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
- filename, "collect_data", "failed",
- result, 0);
+ filename.name.name, "collect_data",
+ audit_cause, result, 0);
+
+ release_dentry_name_snapshot(&filename);
+ }
return result;
}
@@ -169,88 +338,126 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
*
* We only get here if the inode has not already been measured,
* but the measurement could already exist:
- * - multiple copies of the same file on either the same or
+ * - multiple copies of the same file on either the same or
* different filesystems.
* - the inode was previously flushed as well as the iint info,
* containing the hashing info.
*
* Must be called with iint->mutex held.
*/
-void ima_store_measurement(struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig, int pcr,
+ struct ima_template_desc *template_desc)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "ENOMEM";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "ENOMEM";
int result = -ENOMEM;
struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
+ struct ima_event_data event_data = { .iint = iint,
+ .file = file,
+ .filename = filename,
+ .xattr_value = xattr_value,
+ .xattr_len = xattr_len,
+ .modsig = modsig };
int violation = 0;
- if (iint->flags & IMA_MEASURED)
+ /*
+ * We still need to store the measurement in the case of MODSIG because
+ * we only have its contents to put in the list at the time of
+ * appraisal, but a file measurement from earlier might already exist in
+ * the measurement list.
+ */
+ if (iint->measured_pcrs & (0x1 << pcr) && !modsig)
return;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(&event_data, &entry, template_desc);
+ if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
- memset(&entry->template, 0, sizeof(entry->template));
- memcpy(entry->template.digest, iint->ima_xattr.digest, IMA_DIGEST_SIZE);
- strcpy(entry->template.file_name,
- (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
- file->f_dentry->d_name.name : filename);
-
- result = ima_store_template(entry, violation, inode);
- if (!result || result == -EEXIST)
+
+ result = ima_store_template(entry, violation, inode, filename, pcr);
+ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
iint->flags |= IMA_MEASURED;
+ iint->measured_pcrs |= (0x1 << pcr);
+ }
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
}
-void ima_audit_measurement(struct integrity_iint_cache *iint,
+void ima_audit_measurement(struct ima_iint_cache *iint,
const unsigned char *filename)
{
struct audit_buffer *ab;
- char hash[(IMA_DIGEST_SIZE * 2) + 1];
+ char *hash;
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
int i;
if (iint->flags & IMA_AUDITED)
return;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
- hex_byte_pack(hash + (i * 2), iint->ima_xattr.digest[i]);
+ hash = kzalloc((iint->ima_hash->length * 2) + 1, GFP_KERNEL);
+ if (!hash)
+ return;
+
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
hash[i * 2] = '\0';
- ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ ab = audit_log_start(audit_context(), GFP_KERNEL,
AUDIT_INTEGRITY_RULE);
if (!ab)
- return;
+ goto out;
audit_log_format(ab, "file=");
audit_log_untrustedstring(ab, filename);
- audit_log_format(ab, " hash=");
- audit_log_untrustedstring(ab, hash);
+ audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash);
- audit_log_task_info(ab, current);
+ audit_log_task_info(ab);
audit_log_end(ab);
iint->flags |= IMA_AUDITED;
+out:
+ kfree(hash);
+ return;
}
-const char *ima_d_path(struct path *path, char **pathbuf)
+/*
+ * ima_d_path - return a pointer to the full pathname
+ *
+ * Attempt to return a pointer to the full pathname for use in the
+ * IMA measurement list, IMA audit records, and auditing logs.
+ *
+ * On failure, return a pointer to a copy of the filename, not dname.
+ * Returning a pointer to dname, could result in using the pointer
+ * after the memory has been freed.
+ */
+const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
{
+ struct name_snapshot filename;
char *pathname = NULL;
- /* We will allow 11 spaces for ' (deleted)' to be appended */
- *pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ *pathbuf = __getname();
if (*pathbuf) {
- pathname = d_path(path, *pathbuf, PATH_MAX + 11);
+ pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
if (IS_ERR(pathname)) {
- kfree(*pathbuf);
+ __putname(*pathbuf);
*pathbuf = NULL;
pathname = NULL;
}
}
+
+ if (!pathname) {
+ take_dentry_name_snapshot(&filename, path->dentry);
+ strscpy(namebuf, filename.name.name, NAME_MAX);
+ release_dentry_name_snapshot(&filename);
+
+ pathname = namebuf;
+ }
+
return pathname;
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 2d4becab8918..5149ff4fd50d 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -1,262 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 IBM Corporation
*
* Author:
* Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
*/
#include <linux/module.h>
+#include <linux/init.h>
#include <linux/file.h>
+#include <linux/binfmts.h>
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/magic.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <linux/fsverity.h>
+#include <keys/system_keyring.h>
+#include <uapi/linux/fsverity.h>
#include "ima.h"
-static int __init default_appraise_setup(char *str)
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+static char *ima_appraise_cmdline_default __initdata;
+core_param(ima_appraise, ima_appraise_cmdline_default, charp, 0);
+
+void __init ima_appraise_parse_cmdline(void)
{
+ const char *str = ima_appraise_cmdline_default;
+ bool sb_state = arch_ima_get_secureboot();
+ int appraisal_state = ima_appraise;
+
+ if (!str)
+ return;
+
if (strncmp(str, "off", 3) == 0)
- ima_appraise = 0;
+ appraisal_state = 0;
+ else if (strncmp(str, "log", 3) == 0)
+ appraisal_state = IMA_APPRAISE_LOG;
else if (strncmp(str, "fix", 3) == 0)
- ima_appraise = IMA_APPRAISE_FIX;
- return 1;
+ appraisal_state = IMA_APPRAISE_FIX;
+ else if (strncmp(str, "enforce", 7) == 0)
+ appraisal_state = IMA_APPRAISE_ENFORCE;
+ else
+ pr_err("invalid \"%s\" appraise option", str);
+
+ /* If appraisal state was changed, but secure boot is enabled,
+ * keep its default */
+ if (sb_state) {
+ if (!(appraisal_state & IMA_APPRAISE_ENFORCE))
+ pr_info("Secure boot enabled: ignoring ima_appraise=%s option",
+ str);
+ } else {
+ ima_appraise = appraisal_state;
+ }
}
+#endif
-__setup("ima_appraise=", default_appraise_setup);
+/*
+ * is_ima_appraise_enabled - return appraise status
+ *
+ * Only return enabled, if not in ima_appraise="fix" or "log" modes.
+ */
+bool is_ima_appraise_enabled(void)
+{
+ return ima_appraise & IMA_APPRAISE_ENFORCE;
+}
/*
* ima_must_appraise - set appraise flag
*
- * Return 1 to appraise
+ * Return 1 to appraise or hash
*/
-int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
+int ima_must_appraise(struct mnt_idmap *idmap, struct inode *inode,
+ int mask, enum ima_hooks func)
{
+ struct lsm_prop prop;
+
if (!ima_appraise)
return 0;
- return ima_match_policy(inode, func, mask, IMA_APPRAISE);
+ security_current_getlsmprop_subj(&prop);
+ return ima_match_policy(idmap, inode, current_cred(), &prop,
+ func, mask, IMA_APPRAISE | IMA_HASH, NULL,
+ NULL, NULL, NULL);
}
-static int ima_fix_xattr(struct dentry *dentry,
- struct integrity_iint_cache *iint)
+static int ima_fix_xattr(struct dentry *dentry, struct ima_iint_cache *iint)
{
- iint->ima_xattr.type = IMA_XATTR_DIGEST;
- return __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
- (u8 *)&iint->ima_xattr,
- sizeof(iint->ima_xattr), 0);
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
}
/* Return specific func appraised cached result */
-enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
- int func)
+enum integrity_status ima_get_cache_status(struct ima_iint_cache *iint,
+ enum ima_hooks func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
+ case MMAP_CHECK_REQPROT:
return iint->ima_mmap_status;
case BPRM_CHECK:
return iint->ima_bprm_status;
- case MODULE_CHECK:
- return iint->ima_module_status;
+ case CREDS_CHECK:
+ return iint->ima_creds_status;
case FILE_CHECK:
- default:
+ case POST_SETATTR:
return iint->ima_file_status;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return iint->ima_read_status;
}
}
-static void ima_set_cache_status(struct integrity_iint_cache *iint,
- int func, enum integrity_status status)
+static void ima_set_cache_status(struct ima_iint_cache *iint,
+ enum ima_hooks func,
+ enum integrity_status status)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
+ case MMAP_CHECK_REQPROT:
iint->ima_mmap_status = status;
break;
case BPRM_CHECK:
iint->ima_bprm_status = status;
break;
- case MODULE_CHECK:
- iint->ima_module_status = status;
+ case CREDS_CHECK:
+ iint->ima_creds_status = status;
break;
case FILE_CHECK:
- default:
+ case POST_SETATTR:
iint->ima_file_status = status;
break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->ima_read_status = status;
+ break;
}
}
-static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
+static void ima_cache_flags(struct ima_iint_cache *iint, enum ima_hooks func)
{
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
+ case MMAP_CHECK_REQPROT:
iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
break;
case BPRM_CHECK:
iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
break;
- case MODULE_CHECK:
- iint->flags |= (IMA_MODULE_APPRAISED | IMA_APPRAISED);
+ case CREDS_CHECK:
+ iint->flags |= (IMA_CREDS_APPRAISED | IMA_APPRAISED);
break;
case FILE_CHECK:
- default:
+ case POST_SETATTR:
iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->flags |= (IMA_READ_APPRAISED | IMA_APPRAISED);
+ break;
}
}
+enum hash_algo ima_get_hash_algo(const struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ struct signature_v2_hdr *sig;
+ enum hash_algo ret;
+
+ if (!xattr_value || xattr_len < 2)
+ /* return default hash algo */
+ return ima_hash_algo;
+
+ switch (xattr_value->type) {
+ case IMA_VERITY_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 3 || xattr_len <= sizeof(*sig) ||
+ sig->hash_algo >= HASH_ALGO__LAST)
+ return ima_hash_algo;
+ return sig->hash_algo;
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig)
+ || sig->hash_algo >= HASH_ALGO__LAST)
+ return ima_hash_algo;
+ return sig->hash_algo;
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ ret = xattr_value->data[0];
+ if (ret < HASH_ALGO__LAST)
+ return ret;
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->data[16], &zero, 4))
+ return HASH_ALGO_MD5;
+ else
+ return HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ return HASH_ALGO_MD5;
+ break;
+ }
+
+ /* return default hash algo */
+ return ima_hash_algo;
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value, int xattr_len)
+{
+ int ret;
+
+ ret = vfs_getxattr_alloc(&nop_mnt_idmap, dentry, XATTR_NAME_IMA,
+ (char **)xattr_value, xattr_len, GFP_NOFS);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret;
+}
+
/*
- * ima_appraise_measurement - appraise file measurement
+ * calc_file_id_hash - calculate the hash of the ima_file_id struct data
+ * @type: xattr type [enum evm_ima_xattr_type]
+ * @algo: hash algorithm [enum hash_algo]
+ * @digest: pointer to the digest to be hashed
+ * @hash: (out) pointer to the hash
*
- * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
- * Assuming success, compare the xattr hash with the collected measurement.
+ * IMA signature version 3 disambiguates the data that is signed by
+ * indirectly signing the hash of the ima_file_id structure data.
*
- * Return 0 on success, error code otherwise
+ * Signing the ima_file_id struct is currently only supported for
+ * IMA_VERITY_DIGSIG type xattrs.
+ *
+ * Return 0 on success, error code otherwise.
*/
-int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+static int calc_file_id_hash(enum evm_ima_xattr_type type,
+ enum hash_algo algo, const u8 *digest,
+ struct ima_digest_data *hash)
{
- struct dentry *dentry = file->f_dentry;
- struct inode *inode = dentry->d_inode;
- struct evm_ima_xattr_data *xattr_value = NULL;
- enum integrity_status status = INTEGRITY_UNKNOWN;
- const char *op = "appraise_data";
- char *cause = "unknown";
- int rc;
+ struct ima_file_id file_id = {
+ .hash_type = IMA_VERITY_DIGSIG, .hash_algorithm = algo};
+ unsigned int unused = HASH_MAX_DIGESTSIZE - hash_digest_size[algo];
- if (!ima_appraise)
- return 0;
- if (!inode->i_op->getxattr)
- return INTEGRITY_UNKNOWN;
+ if (type != IMA_VERITY_DIGSIG)
+ return -EINVAL;
- rc = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)&xattr_value,
- 0, GFP_NOFS);
- if (rc <= 0) {
- if (rc && rc != -ENODATA)
- goto out;
+ memcpy(file_id.hash, digest, hash_digest_size[algo]);
- cause = "missing-hash";
- status =
- (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
- goto out;
- }
+ hash->algo = algo;
+ hash->length = hash_digest_size[algo];
+
+ return ima_calc_buffer_hash(&file_id, sizeof(file_id) - unused, hash);
+}
+
+/*
+ * xattr_verify - verify xattr digest or signature
+ *
+ * Verify whether the hash or signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int xattr_verify(enum ima_hooks func, struct ima_iint_cache *iint,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ enum integrity_status *status, const char **cause)
+{
+ struct ima_max_digest_data hash;
+ struct signature_v2_hdr *sig;
+ int rc = -EINVAL, hash_start = 0;
+ int mask;
- status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
- if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
- if ((status == INTEGRITY_NOLABEL)
- || (status == INTEGRITY_NOXATTRS))
- cause = "missing-HMAC";
- else if (status == INTEGRITY_FAIL)
- cause = "invalid-HMAC";
- goto out;
- }
switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ fallthrough;
case IMA_XATTR_DIGEST:
- if (iint->flags & IMA_DIGSIG_REQUIRED) {
- cause = "IMA signature required";
- status = INTEGRITY_FAIL;
- break;
+ if (*status != INTEGRITY_PASS_IMMUTABLE) {
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (iint->flags & IMA_VERITY_REQUIRED)
+ *cause = "verity-signature-required";
+ else
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ } else {
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
}
- rc = memcmp(xattr_value->digest, iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /*
+ * xattr length may be longer. md5 hash in previous
+ * version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->data[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
if (rc) {
- cause = "invalid-hash";
- status = INTEGRITY_FAIL;
+ *cause = "invalid-hash";
+ *status = INTEGRITY_FAIL;
break;
}
- status = INTEGRITY_PASS;
+ *status = INTEGRITY_PASS;
break;
case EVM_IMA_XATTR_DIGSIG:
- iint->flags |= IMA_DIGSIG;
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+
+ mask = IMA_DIGSIG_REQUIRED | IMA_VERITY_REQUIRED;
+ if ((iint->flags & mask) == mask) {
+ *cause = "verity-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ sig = (typeof(sig))xattr_value;
+ if (sig->version >= 3) {
+ *cause = "invalid-signature-version";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
- xattr_value->digest, rc - 1,
- iint->ima_xattr.digest,
- IMA_DIGEST_SIZE);
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
if (rc == -EOPNOTSUPP) {
- status = INTEGRITY_UNKNOWN;
- } else if (rc) {
- cause = "invalid-signature";
- status = INTEGRITY_FAIL;
+ *status = INTEGRITY_UNKNOWN;
+ break;
+ }
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ (const char *)xattr_value,
+ xattr_len,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
} else {
- status = INTEGRITY_PASS;
+ *status = INTEGRITY_PASS;
}
break;
+ case IMA_VERITY_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (!(iint->flags & IMA_VERITY_REQUIRED)) {
+ *cause = "IMA-signature-required";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+ }
+
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 3) {
+ *cause = "invalid-signature-version";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ rc = calc_file_id_hash(IMA_VERITY_DIGSIG, iint->ima_hash->algo,
+ iint->ima_hash->digest,
+ container_of(&hash.hdr,
+ struct ima_digest_data, hdr));
+ if (rc) {
+ *cause = "sigv3-hashing-error";
+ *status = INTEGRITY_FAIL;
+ break;
+ }
+
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value,
+ xattr_len, hash.digest,
+ hash.hdr.length);
+ if (rc) {
+ *cause = "invalid-verity-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ break;
default:
- status = INTEGRITY_UNKNOWN;
- cause = "unknown-ima-data";
+ *status = INTEGRITY_UNKNOWN;
+ *cause = "unknown-ima-data";
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * modsig_verify - verify modsig signature
+ *
+ * Verify whether the signature matches the file contents.
+ *
+ * Return 0 on success, error code otherwise.
+ */
+static int modsig_verify(enum ima_hooks func, const struct modsig *modsig,
+ enum integrity_status *status, const char **cause)
+{
+ int rc;
+
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_IMA, modsig);
+ if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc &&
+ func == KEXEC_KERNEL_CHECK)
+ rc = integrity_modsig_verify(INTEGRITY_KEYRING_PLATFORM,
+ modsig);
+ if (rc) {
+ *cause = "invalid-signature";
+ *status = INTEGRITY_FAIL;
+ } else {
+ *status = INTEGRITY_PASS;
+ }
+
+ return rc;
+}
+
+/*
+ * ima_check_blacklist - determine if the binary is blacklisted.
+ *
+ * Add the hash of the blacklisted binary to the measurement list, based
+ * on policy.
+ *
+ * Returns -EPERM if the hash is blacklisted.
+ */
+int ima_check_blacklist(struct ima_iint_cache *iint,
+ const struct modsig *modsig, int pcr)
+{
+ enum hash_algo hash_algo;
+ const u8 *digest = NULL;
+ u32 digestsize = 0;
+ int rc = 0;
+
+ if (!(iint->flags & IMA_CHECK_BLACKLIST))
+ return 0;
+
+ if (iint->flags & IMA_MODSIG_ALLOWED && modsig) {
+ ima_get_modsig_digest(modsig, &hash_algo, &digest, &digestsize);
+
+ rc = is_binary_blacklisted(digest, digestsize);
+ } else if (iint->flags & IMA_DIGSIG_REQUIRED && iint->ima_hash)
+ rc = is_binary_blacklisted(iint->ima_hash->digest, iint->ima_hash->length);
+
+ if ((rc == -EPERM) && (iint->flags & IMA_MEASURE))
+ process_buffer_measurement(&nop_mnt_idmap, NULL, digest, digestsize,
+ "blacklisted-hash", NONE,
+ pcr, NULL, false, NULL, 0);
+
+ return rc;
+}
+
+static bool is_bprm_creds_for_exec(enum ima_hooks func, struct file *file)
+{
+ struct linux_binprm *bprm;
+
+ if (func == BPRM_CHECK) {
+ bprm = container_of(&file, struct linux_binprm, file);
+ return bprm->is_check;
+ }
+ return false;
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, const struct modsig *modsig)
+{
+ static const char op[] = "appraise_data";
+ int audit_msgno = AUDIT_INTEGRITY_DATA;
+ const char *cause = "unknown";
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = d_backing_inode(dentry);
+ enum integrity_status status = INTEGRITY_UNKNOWN;
+ int rc = xattr_len;
+ bool try_modsig = iint->flags & IMA_MODSIG_ALLOWED && modsig;
+
+ /* If not appraising a modsig, we need an xattr. */
+ if (!(inode->i_opflags & IOP_XATTR) && !try_modsig)
+ return INTEGRITY_UNKNOWN;
+
+ /*
+ * Unlike any of the other LSM hooks where the kernel enforces file
+ * integrity, enforcing file integrity for the bprm_creds_for_exec()
+ * LSM hook with the AT_EXECVE_CHECK flag is left up to the discretion
+ * of the script interpreter(userspace). Differentiate kernel and
+ * userspace enforced integrity audit messages.
+ */
+ if (is_bprm_creds_for_exec(func, file))
+ audit_msgno = AUDIT_INTEGRITY_USERSPACE;
+
+ /* If reading the xattr failed and there's no modsig, error out. */
+ if (rc <= 0 && !try_modsig) {
+ if (rc && rc != -ENODATA)
+ goto out;
+
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ if (iint->flags & IMA_VERITY_REQUIRED)
+ cause = "verity-signature-required";
+ else
+ cause = "IMA-signature-required";
+ } else {
+ cause = "missing-hash";
+ }
+
+ status = INTEGRITY_NOLABEL;
+ if (file->f_mode & FMODE_CREATED)
+ iint->flags |= IMA_NEW_FILE;
+ if ((iint->flags & IMA_NEW_FILE) &&
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
+ status = INTEGRITY_PASS;
+ goto out;
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value,
+ rc < 0 ? 0 : rc);
+ switch (status) {
+ case INTEGRITY_PASS:
+ case INTEGRITY_PASS_IMMUTABLE:
+ case INTEGRITY_UNKNOWN:
break;
+ case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
+ /* It's fine not to have xattrs when using a modsig. */
+ if (try_modsig)
+ break;
+ fallthrough;
+ case INTEGRITY_NOLABEL: /* No security.evm xattr. */
+ cause = "missing-HMAC";
+ goto out;
+ case INTEGRITY_FAIL_IMMUTABLE:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ cause = "invalid-fail-immutable";
+ goto out;
+ case INTEGRITY_FAIL: /* Invalid HMAC/signature. */
+ cause = "invalid-HMAC";
+ goto out;
+ default:
+ WARN_ONCE(true, "Unexpected integrity status %d\n", status);
}
+ if (xattr_value)
+ rc = xattr_verify(func, iint, xattr_value, xattr_len, &status,
+ &cause);
+
+ /*
+ * If we have a modsig and either no imasig or the imasig's key isn't
+ * known, then try verifying the modsig.
+ */
+ if (try_modsig &&
+ (!xattr_value || xattr_value->type == IMA_XATTR_DIGEST_NG ||
+ rc == -ENOKEY))
+ rc = modsig_verify(func, modsig, &status, &cause);
+
out:
- if (status != INTEGRITY_PASS) {
- if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ /*
+ * File signatures on some filesystems can not be properly verified.
+ * When such filesystems are mounted by an untrusted mounter or on a
+ * system not willing to accept such a risk, fail the file signature
+ * verification.
+ */
+ if ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ ((inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) ||
+ (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ status = INTEGRITY_FAIL;
+ cause = "unverifiable-signature";
+ integrity_audit_msg(audit_msgno, inode, filename,
+ op, cause, rc, 0);
+ } else if (status != INTEGRITY_PASS) {
+ /* Fix mode, but don't replace file signatures. */
+ if ((ima_appraise & IMA_APPRAISE_FIX) && !try_modsig &&
(!xattr_value ||
xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
if (!ima_fix_xattr(dentry, iint))
status = INTEGRITY_PASS;
}
- integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+
+ /*
+ * Permit new files with file/EVM portable signatures, but
+ * without data.
+ */
+ if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE &&
+ test_bit(IMA_DIGSIG, &iint->atomic_flags)) {
+ status = INTEGRITY_PASS;
+ }
+
+ integrity_audit_msg(audit_msgno, inode, filename,
op, cause, rc, 0);
} else {
ima_cache_flags(iint, func);
}
+
ima_set_cache_status(iint, func, status);
- kfree(xattr_value);
return status;
}
/*
* ima_update_xattr - update 'security.ima' hash value
*/
-void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+void ima_update_xattr(struct ima_iint_cache *iint, struct file *file)
{
- struct dentry *dentry = file->f_dentry;
+ struct dentry *dentry = file_dentry(file);
int rc = 0;
/* do not collect and update hash for digital signatures */
- if (iint->flags & IMA_DIGSIG)
+ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
return;
- rc = ima_collect_measurement(iint, file);
+ if ((iint->ima_file_status != INTEGRITY_PASS) &&
+ !(iint->flags & IMA_HASH))
+ return;
+
+ rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo, NULL);
if (rc < 0)
return;
+ inode_lock(file_inode(file));
ima_fix_xattr(dentry, iint);
+ inode_unlock(file_inode(file));
}
/**
* ima_inode_post_setattr - reflect file metadata changes
+ * @idmap: idmap of the mount the inode was found from
* @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
*
* Changes to a dentry's metadata might result in needing to appraise.
*
* This function is called from notify_change(), which expects the caller
* to lock the inode's i_mutex.
*/
-void ima_inode_post_setattr(struct dentry *dentry)
+static void ima_inode_post_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, int ia_valid)
{
- struct inode *inode = dentry->d_inode;
- struct integrity_iint_cache *iint;
- int must_appraise, rc;
+ struct inode *inode = d_backing_inode(dentry);
+ struct ima_iint_cache *iint;
+ int action;
- if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)
- || !inode->i_op->removexattr)
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
+ || !(inode->i_opflags & IOP_XATTR))
return;
- must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
- iint = integrity_iint_find(inode);
+ action = ima_must_appraise(idmap, inode, MAY_ACCESS, POST_SETATTR);
+ iint = ima_iint_find(inode);
if (iint) {
- iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
- IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_ACTION_FLAGS);
- if (must_appraise)
- iint->flags |= IMA_APPRAISE;
+ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+ if (!action)
+ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
}
- if (!must_appraise)
- rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
- return;
}
/*
@@ -275,43 +694,167 @@ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
return 0;
}
-static void ima_reset_appraise_flags(struct inode *inode)
+/*
+ * ima_reset_appraise_flags - reset ima_iint_cache flags
+ *
+ * @digsig: whether to clear/set IMA_DIGSIG flag, tristate values
+ * 0: clear IMA_DIGSIG
+ * 1: set IMA_DIGSIG
+ * -1: don't change IMA_DIGSIG
+ *
+ */
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
{
- struct integrity_iint_cache *iint;
+ struct ima_iint_cache *iint;
- if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode))
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode))
return;
- iint = integrity_iint_find(inode);
+ iint = ima_iint_find(inode);
if (!iint)
return;
+ iint->measured_pcrs = 0;
+ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
+ if (digsig == 1)
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ else if (digsig == 0)
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+}
+
+/**
+ * validate_hash_algo() - Block setxattr with unsupported hash algorithms
+ * @dentry: object of the setxattr()
+ * @xattr_value: userland supplied xattr value
+ * @xattr_value_len: length of xattr_value
+ *
+ * The xattr value is mapped to its hash algorithm, and this algorithm
+ * must be built in the kernel for the setxattr to be allowed.
+ *
+ * Emit an audit message when the algorithm is invalid.
+ *
+ * Return: 0 on success, else an error.
+ */
+static int validate_hash_algo(struct dentry *dentry,
+ const struct evm_ima_xattr_data *xattr_value,
+ size_t xattr_value_len)
+{
+ char *path = NULL, *pathbuf = NULL;
+ enum hash_algo xattr_hash_algo;
+ const char *errmsg = "unavailable-hash-algorithm";
+ unsigned int allowed_hashes;
+
+ xattr_hash_algo = ima_get_hash_algo(xattr_value, xattr_value_len);
+
+ allowed_hashes = atomic_read(&ima_setxattr_allowed_hash_algorithms);
+
+ if (allowed_hashes) {
+ /* success if the algorithm is allowed in the ima policy */
+ if (allowed_hashes & (1U << xattr_hash_algo))
+ return 0;
- iint->flags &= ~IMA_DONE_MASK;
- return;
+ /*
+ * We use a different audit message when the hash algorithm
+ * is denied by a policy rule, instead of not being built
+ * in the kernel image
+ */
+ errmsg = "denied-hash-algorithm";
+ } else {
+ if (likely(xattr_hash_algo == ima_hash_algo))
+ return 0;
+
+ /* allow any xattr using an algorithm built in the kernel */
+ if (crypto_has_alg(hash_algo_name[xattr_hash_algo], 0, 0))
+ return 0;
+ }
+
+ pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!pathbuf)
+ return -EACCES;
+
+ path = dentry_path(dentry, pathbuf, PATH_MAX);
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, d_inode(dentry), path,
+ "set_data", errmsg, -EACCES, 0);
+
+ kfree(pathbuf);
+
+ return -EACCES;
}
-int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
- const void *xattr_value, size_t xattr_value_len)
+static int ima_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *xattr_name, const void *xattr_value,
+ size_t xattr_value_len, int flags)
{
+ const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int digsig = 0;
int result;
+ int err;
result = ima_protect_xattr(dentry, xattr_name, xattr_value,
xattr_value_len);
if (result == 1) {
- ima_reset_appraise_flags(dentry->d_inode);
- result = 0;
+ if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+ return -EINVAL;
+
+ err = validate_hash_algo(dentry, xvalue, xattr_value_len);
+ if (err)
+ return err;
+
+ digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
+ } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) {
+ digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG);
+ } else {
+ digsig = -1;
+ }
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
+ ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ if (result == 1)
+ result = 0;
}
return result;
}
-int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+static int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl)
{
- int result;
+ if (evm_revalidate_status(acl_name))
+ ima_reset_appraise_flags(d_backing_inode(dentry), -1);
+
+ return 0;
+}
+
+static int ima_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *xattr_name)
+{
+ int result, digsig = -1;
result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
- if (result == 1) {
- ima_reset_appraise_flags(dentry->d_inode);
- result = 0;
+ if (result == 1 || evm_revalidate_status(xattr_name)) {
+ if (!strcmp(xattr_name, XATTR_NAME_IMA))
+ digsig = 0;
+ ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ if (result == 1)
+ result = 0;
}
return result;
}
+
+static int ima_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name)
+{
+ return ima_inode_set_acl(idmap, dentry, acl_name, NULL);
+}
+
+static struct security_hook_list ima_appraise_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(inode_post_setattr, ima_inode_post_setattr),
+ LSM_HOOK_INIT(inode_setxattr, ima_inode_setxattr),
+ LSM_HOOK_INIT(inode_set_acl, ima_inode_set_acl),
+ LSM_HOOK_INIT(inode_removexattr, ima_inode_removexattr),
+ LSM_HOOK_INIT(inode_remove_acl, ima_inode_remove_acl),
+};
+
+void __init init_ima_appraise_lsm(const struct lsm_id *lsmid)
+{
+ security_add_hooks(ima_appraise_hooks, ARRAY_SIZE(ima_appraise_hooks),
+ lsmid);
+}
diff --git a/security/integrity/ima/ima_asymmetric_keys.c b/security/integrity/ima/ima_asymmetric_keys.c
new file mode 100644
index 000000000000..caacfe6860b1
--- /dev/null
+++ b/security/integrity/ima/ima_asymmetric_keys.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_asymmetric_keys.c
+ * Defines an IMA hook to measure asymmetric keys on key
+ * create or update.
+ */
+
+#include <keys/asymmetric-type.h>
+#include <linux/user_namespace.h>
+#include <linux/ima.h>
+#include "ima.h"
+
+/**
+ * ima_post_key_create_or_update - measure asymmetric keys
+ * @keyring: keyring to which the key is linked to
+ * @key: created or updated key
+ * @payload: The data used to instantiate or update the key.
+ * @payload_len: The length of @payload.
+ * @flags: key flags
+ * @create: flag indicating whether the key was created or updated
+ *
+ * Keys can only be measured, not appraised.
+ * The payload data used to instantiate or update the key is measured.
+ */
+void ima_post_key_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
+{
+ bool queued = false;
+
+ /* Only asymmetric keys are handled by this hook. */
+ if (key->type != &key_type_asymmetric)
+ return;
+
+ if (!payload || (payload_len == 0))
+ return;
+
+ if (ima_should_queue_key())
+ queued = ima_queue_key(keyring, payload, payload_len);
+
+ if (queued)
+ return;
+
+ /*
+ * keyring->description points to the name of the keyring
+ * (such as ".builtin_trusted_keys", ".ima", etc.) to
+ * which the given key is linked to.
+ *
+ * The name of the keyring is passed in the "eventname"
+ * parameter to process_buffer_measurement() and is set
+ * in the "eventname" field in ima_event_data for
+ * the key measurement IMA event.
+ *
+ * The name of the keyring is also passed in the "keyring"
+ * parameter to process_buffer_measurement() to check
+ * if the IMA policy is configured to measure a key linked
+ * to the given keyring.
+ */
+ process_buffer_measurement(&nop_mnt_idmap, NULL, payload, payload_len,
+ keyring->description, KEY_CHECK, 0,
+ keyring->description, false, NULL, 0);
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index a02e0791cf15..6f5696d999d0 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -5,144 +6,872 @@
* Mimi Zohar <zohar@us.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
* File: ima_crypto.c
- * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
#include <linux/file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <crypto/hash.h>
+
#include "ima.h"
+/* minimum file size for ahash use */
+static unsigned long ima_ahash_minsize;
+module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
+MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
+
+/* default is 0 - 1 page. */
+static int ima_maxorder;
+static unsigned int ima_bufsize = PAGE_SIZE;
+
+static int param_set_bufsize(const char *val, const struct kernel_param *kp)
+{
+ unsigned long long size;
+ int order;
+
+ size = memparse(val, NULL);
+ order = get_order(size);
+ if (order > MAX_PAGE_ORDER)
+ return -EINVAL;
+ ima_maxorder = order;
+ ima_bufsize = PAGE_SIZE << order;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_bufsize = {
+ .set = param_set_bufsize,
+ .get = param_get_uint,
+};
+#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
+
+module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
+MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
+
static struct crypto_shash *ima_shash_tfm;
+static struct crypto_ahash *ima_ahash_tfm;
+
+int ima_sha1_idx __ro_after_init;
+int ima_hash_algo_idx __ro_after_init;
+/*
+ * Additional number of slots reserved, as needed, for SHA1
+ * and IMA default algo.
+ */
+int ima_extra_slots __ro_after_init;
-int ima_init_crypto(void)
+struct ima_algo_desc *ima_algo_array __ro_after_init;
+
+static int __init ima_init_ima_crypto(void)
{
long rc;
- ima_shash_tfm = crypto_alloc_shash(ima_hash, 0, 0);
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
if (IS_ERR(ima_shash_tfm)) {
rc = PTR_ERR(ima_shash_tfm);
- pr_err("Can not allocate %s (reason: %ld)\n", ima_hash, rc);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
return rc;
}
+ pr_info("Allocated hash algorithm: %s\n",
+ hash_algo_name[ima_hash_algo]);
return 0;
}
-/*
- * Calculate the MD5/SHA1 file digest
- */
-int ima_calc_file_hash(struct file *file, char *digest)
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
{
- loff_t i_size, offset = 0;
- char *rbuf;
- int rc, read = 0;
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
- } desc;
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc, i;
- desc.shash.tfm = ima_shash_tfm;
- desc.shash.flags = 0;
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
- rc = crypto_shash_init(&desc.shash);
- if (rc != 0)
+ if (algo == ima_hash_algo)
+ return tfm;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
+ return ima_algo_array[i].tfm;
+
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ return tfm;
+}
+
+int __init ima_init_crypto(void)
+{
+ enum hash_algo algo;
+ long rc;
+ int i;
+
+ rc = ima_init_ima_crypto();
+ if (rc)
return rc;
- rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!rbuf) {
+ ima_sha1_idx = -1;
+ ima_hash_algo_idx = -1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (algo == HASH_ALGO_SHA1)
+ ima_sha1_idx = i;
+
+ if (algo == ima_hash_algo)
+ ima_hash_algo_idx = i;
+ }
+
+ if (ima_sha1_idx < 0) {
+ ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+ if (ima_hash_algo == HASH_ALGO_SHA1)
+ ima_hash_algo_idx = ima_sha1_idx;
+ }
+
+ if (ima_hash_algo_idx < 0)
+ ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+
+ ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*ima_algo_array), GFP_KERNEL);
+ if (!ima_algo_array) {
rc = -ENOMEM;
goto out;
}
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ ima_algo_array[i].algo = algo;
+
+ /* unknown TPM algorithm */
+ if (algo == HASH_ALGO__LAST)
+ continue;
+
+ if (algo == ima_hash_algo) {
+ ima_algo_array[i].tfm = ima_shash_tfm;
+ continue;
+ }
+
+ ima_algo_array[i].tfm = ima_alloc_tfm(algo);
+ if (IS_ERR(ima_algo_array[i].tfm)) {
+ if (algo == HASH_ALGO_SHA1) {
+ rc = PTR_ERR(ima_algo_array[i].tfm);
+ ima_algo_array[i].tfm = NULL;
+ goto out_array;
+ }
+
+ ima_algo_array[i].tfm = NULL;
+ }
}
+
+ if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
+ if (ima_hash_algo == HASH_ALGO_SHA1) {
+ ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
+ } else {
+ ima_algo_array[ima_sha1_idx].tfm =
+ ima_alloc_tfm(HASH_ALGO_SHA1);
+ if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
+ rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
+ goto out_array;
+ }
+ }
+
+ ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
+ }
+
+ if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
+ ima_hash_algo_idx != ima_sha1_idx) {
+ ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
+ ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
+ }
+
+ return 0;
+out_array:
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (!ima_algo_array[i].tfm ||
+ ima_algo_array[i].tfm == ima_shash_tfm)
+ continue;
+
+ crypto_free_shash(ima_algo_array[i].tfm);
+ }
+ kfree(ima_algo_array);
+out:
+ crypto_free_shash(ima_shash_tfm);
+ return rc;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ int i;
+
+ if (tfm == ima_shash_tfm)
+ return;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm == tfm)
+ return;
+
+ crypto_free_shash(tfm);
+}
+
+/**
+ * ima_alloc_pages() - Allocate contiguous pages.
+ * @max_size: Maximum amount of memory to allocate.
+ * @allocated_size: Returned size of actual allocation.
+ * @last_warn: Should the min_size allocation warn or not.
+ *
+ * Tries to do opportunistic allocation for memory first trying to allocate
+ * max_size amount of memory and then splitting that until zero order is
+ * reached. Allocation is tried without generating allocation warnings unless
+ * last_warn is set. Last_warn set affects only last allocation of zero order.
+ *
+ * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
+ *
+ * Return pointer to allocated memory, or NULL on failure.
+ */
+static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
+ int last_warn)
+{
+ void *ptr;
+ int order = ima_maxorder;
+ gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
+
+ if (order)
+ order = min(get_order(max_size), order);
+
+ for (; order; order--) {
+ ptr = (void *)__get_free_pages(gfp_mask, order);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE << order;
+ return ptr;
+ }
+ }
+
+ /* order is zero - one page */
+
+ gfp_mask = GFP_KERNEL;
+
+ if (!last_warn)
+ gfp_mask |= __GFP_NOWARN;
+
+ ptr = (void *)__get_free_pages(gfp_mask, 0);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE;
+ return ptr;
+ }
+
+ *allocated_size = 0;
+ return NULL;
+}
+
+/**
+ * ima_free_pages() - Free pages allocated by ima_alloc_pages().
+ * @ptr: Pointer to allocated pages.
+ * @size: Size of allocated buffer.
+ */
+static void ima_free_pages(void *ptr, size_t size)
+{
+ if (!ptr)
+ return;
+ free_pages((unsigned long)ptr, get_order(size));
+}
+
+static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
+{
+ struct crypto_ahash *tfm = ima_ahash_tfm;
+ int rc;
+
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo || !tfm) {
+ tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
+ if (!IS_ERR(tfm)) {
+ if (algo == ima_hash_algo)
+ ima_ahash_tfm = tfm;
+ } else {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_atfm(struct crypto_ahash *tfm)
+{
+ if (tfm != ima_ahash_tfm)
+ crypto_free_ahash(tfm);
+}
+
+static inline int ahash_wait(int err, struct crypto_wait *wait)
+{
+
+ err = crypto_wait_req(err, wait);
+
+ if (err)
+ pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
+
+ return err;
+}
+
+static int ima_calc_file_hash_atfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ loff_t i_size, offset;
+ char *rbuf[2] = { NULL, };
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
+ struct ahash_request *req;
+ struct scatterlist sg[1];
+ struct crypto_wait wait;
+ size_t rbuf_size[2];
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out1;
+
i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out2;
+
+ /*
+ * Try to allocate maximum size of memory.
+ * Fail if even a single page cannot be allocated.
+ */
+ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
+ if (!rbuf[0]) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+
+ /* Only allocate one buffer if that is enough. */
+ if (i_size > rbuf_size[0]) {
+ /*
+ * Try to allocate secondary buffer. If that fails fallback to
+ * using single buffering. Use previous memory allocation size
+ * as baseline for possible allocation size.
+ */
+ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
+ &rbuf_size[1], 0);
+ }
+
+ for (offset = 0; offset < i_size; offset += rbuf_len) {
+ if (!rbuf[1] && offset) {
+ /* Not using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+ /* read buffer */
+ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+ rc = integrity_kernel_read(file, offset, rbuf[active],
+ rbuf_len);
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
+ goto out3;
+ }
+
+ if (rbuf[1] && offset) {
+ /* Using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+
+ sg_init_one(&sg[0], rbuf[active], rbuf_len);
+ ahash_request_set_crypt(req, sg, NULL, rbuf_len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ if (rbuf[1])
+ active = !active; /* swap buffers, if we use two */
+ }
+ /* wait for the last update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+out3:
+ ima_free_pages(rbuf[0], rbuf_size[0]);
+ ima_free_pages(rbuf[1], rbuf_size[1]);
+out2:
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out1:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_atfm(file, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ loff_t i_size, offset = 0;
+ char *rbuf;
+ int rc;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out;
+
+ rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
while (offset < i_size) {
int rbuf_len;
- rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
+ rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
if (rbuf_len < 0) {
rc = rbuf_len;
break;
}
- if (rbuf_len == 0)
+ if (rbuf_len == 0) { /* unexpected EOF */
+ rc = -EINVAL;
break;
+ }
offset += rbuf_len;
- rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
+ rc = crypto_shash_update(shash, rbuf, rbuf_len);
if (rc)
break;
}
kfree(rbuf);
+out:
if (!rc)
- rc = crypto_shash_final(&desc.shash, digest);
- if (read)
- file->f_mode &= ~FMODE_READ;
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
+/*
+ * ima_calc_file_hash - calculate file hash
+ *
+ * Asynchronous hash (ahash) allows using HW acceleration for calculating
+ * a hash. ahash performance varies for different data sizes on different
+ * crypto accelerators. shash performance might be better for smaller files.
+ * The 'ima.ahash_minsize' module parameter allows specifying the best
+ * minimum file size for using ahash on the system.
+ *
+ * If the ima.ahash_minsize parameter is not specified, this function uses
+ * shash for the hash calculation. If ahash fails, it falls back to using
+ * shash.
+ */
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ loff_t i_size;
+ int rc;
+ struct file *f = file;
+ bool new_file_instance = false;
+
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ new_file_instance = true;
+ }
+
+ i_size = i_size_read(file_inode(f));
+
+ if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
+ rc = ima_calc_file_ahash(f, hash);
+ if (!rc)
+ goto out;
+ }
+
+ rc = ima_calc_file_shash(f, hash);
out:
+ if (new_file_instance)
+ fput(f);
return rc;
}
/*
- * Calculate the hash of a given buffer
+ * Calculate the hash of template data
*/
-int ima_calc_buffer_hash(const void *data, int len, char *digest)
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_entry *entry,
+ int tfm_idx)
+{
+ SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
+ struct ima_template_desc *td = entry->template_desc;
+ int num_fields = entry->template_desc->num_fields;
+ int rc, i;
+
+ shash->tfm = ima_algo_array[tfm_idx].tfm;
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ for (i = 0; i < num_fields; i++) {
+ u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+ u8 *data_to_hash = field_data[i].data;
+ u32 datalen = field_data[i].len;
+ u32 datalen_to_hash = !ima_canonical_fmt ?
+ datalen : (__force u32)cpu_to_le32(datalen);
+
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(shash,
+ (const u8 *) &datalen_to_hash,
+ sizeof(datalen_to_hash));
+ if (rc)
+ break;
+ } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+ memcpy(buffer, data_to_hash, datalen);
+ data_to_hash = buffer;
+ datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+ }
+ rc = crypto_shash_update(shash, data_to_hash, datalen);
+ if (rc)
+ break;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_entry *entry)
+{
+ u16 alg_id;
+ int rc, i;
+
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
+ if (rc)
+ return rc;
+
+ entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (i == ima_sha1_idx)
+ continue;
+
+ if (i < NR_BANKS(ima_tpm_chip)) {
+ alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ entry->digests[i].alg_id = alg_id;
+ }
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (!ima_algo_array[i].tfm) {
+ memcpy(entry->digests[i].digest,
+ entry->digests[ima_sha1_idx].digest,
+ TPM_DIGEST_SIZE);
+ continue;
+ }
+
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
+ if (rc)
+ return rc;
+ }
+ return rc;
+}
+
+static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ struct ahash_request *req;
+ struct scatterlist sg;
+ struct crypto_wait wait;
+ int rc, ahash_rc = 0;
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out;
+
+ sg_init_one(&sg, buf, len);
+ ahash_request_set_crypt(req, &sg, NULL, len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ /* wait for the update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int calc_buffer_ahash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int calc_buffer_shash_tfm(const void *buf, loff_t size,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+ unsigned int len;
+ int rc;
+
+ shash->tfm = tfm;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ while (size) {
+ len = size < PAGE_SIZE ? size : PAGE_SIZE;
+ rc = crypto_shash_update(shash, buf, len);
+ if (rc)
+ break;
+ buf += len;
+ size -= len;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int calc_buffer_shash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
+
+ ima_free_tfm(tfm);
+ return rc;
+}
+
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
{
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
- } desc;
+ int rc;
- desc.shash.tfm = ima_shash_tfm;
- desc.shash.flags = 0;
+ if (ima_ahash_minsize && len >= ima_ahash_minsize) {
+ rc = calc_buffer_ahash(buf, len, hash);
+ if (!rc)
+ return 0;
+ }
- return crypto_shash_digest(&desc.shash, data, len, digest);
+ return calc_buffer_shash(buf, len, hash);
}
-static void __init ima_pcrread(int idx, u8 *pcr)
+static void ima_pcrread(u32 idx, struct tpm_digest *d)
{
- if (!ima_used_chip)
+ if (!ima_tpm_chip)
return;
- if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
- pr_err("IMA: Error Communicating to TPM chip\n");
+ if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
+ pr_err("Error Communicating to TPM chip\n");
}
/*
- * Calculate the boot aggregate hash
+ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
+ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
+ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
+ * allowing firmware to configure and enable different banks.
+ *
+ * Knowing which TPM bank is read to calculate the boot_aggregate digest
+ * needs to be conveyed to a verifier. For this reason, use the same
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
*/
-int __init ima_calc_boot_aggregate(char *digest)
+static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ struct crypto_shash *tfm)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
- int rc, i;
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(ima_shash_tfm)];
- } desc;
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
+ int rc;
+ u32 i;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
- desc.shash.tfm = ima_shash_tfm;
- desc.shash.flags = 0;
+ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
+ d.alg_id);
- rc = crypto_shash_init(&desc.shash);
+ rc = crypto_shash_init(shash);
if (rc != 0)
return rc;
- /* cumulative sha1 over tpm registers 0-7 */
+ /* cumulative digest over TPM registers 0-7 */
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
- ima_pcrread(i, pcr_i);
+ ima_pcrread(i, &d);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(&desc.shash, pcr_i, IMA_DIGEST_SIZE);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ if (rc != 0)
+ return rc;
+ }
+ /*
+ * Extend cumulative digest over TPM registers 8-9, which contain
+ * measurement for the kernel command line (reg. 8) and image (reg. 9)
+ * in a typical PCR allocation. Registers 8-9 are only included in
+ * non-SHA1 boot_aggregate digests to avoid ambiguity.
+ */
+ if (alg_id != TPM_ALG_SHA1) {
+ for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+ ima_pcrread(i, &d);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ }
}
if (!rc)
- crypto_shash_final(&desc.shash, digest);
+ crypto_shash_final(shash, digest);
+ return rc;
+}
+
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ u16 crypto_id, alg_id;
+ int rc, i, bank_idx = -1;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (crypto_id == hash->algo) {
+ bank_idx = i;
+ break;
+ }
+
+ if (crypto_id == HASH_ALGO_SHA256)
+ bank_idx = i;
+
+ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
+ bank_idx = i;
+ }
+
+ if (bank_idx == -1) {
+ pr_err("No suitable TPM algorithm for boot aggregate\n");
+ return 0;
+ }
+
+ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
diff --git a/security/integrity/ima/ima_efi.c b/security/integrity/ima/ima_efi.c
new file mode 100644
index 000000000000..138029bfcce1
--- /dev/null
+++ b/security/integrity/ima/ima_efi.c
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 IBM Corporation
+ */
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/ima.h>
+#include <asm/efi.h>
+
+#ifndef arch_ima_efi_boot_mode
+#define arch_ima_efi_boot_mode efi_secureboot_mode_unset
+#endif
+
+static enum efi_secureboot_mode get_sb_mode(void)
+{
+ enum efi_secureboot_mode mode;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) {
+ pr_info("ima: secureboot mode unknown, no efi\n");
+ return efi_secureboot_mode_unknown;
+ }
+
+ mode = efi_get_secureboot_mode(efi.get_variable);
+ if (mode == efi_secureboot_mode_disabled)
+ pr_info("ima: secureboot mode disabled\n");
+ else if (mode == efi_secureboot_mode_unknown)
+ pr_info("ima: secureboot mode unknown\n");
+ else
+ pr_info("ima: secureboot mode enabled\n");
+ return mode;
+}
+
+bool arch_ima_get_secureboot(void)
+{
+ static enum efi_secureboot_mode sb_mode;
+ static bool initialized;
+
+ if (!initialized && efi_enabled(EFI_BOOT)) {
+ sb_mode = arch_ima_efi_boot_mode;
+
+ if (sb_mode == efi_secureboot_mode_unset)
+ sb_mode = get_sb_mode();
+ initialized = true;
+ }
+
+ if (sb_mode == efi_secureboot_mode_enabled)
+ return true;
+ else
+ return false;
+}
+
+/* secureboot arch rules */
+static const char * const sb_arch_rules[] = {
+#if !IS_ENABLED(CONFIG_KEXEC_SIG)
+ "appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig",
+#endif /* CONFIG_KEXEC_SIG */
+ "measure func=KEXEC_KERNEL_CHECK",
+#if !IS_ENABLED(CONFIG_MODULE_SIG)
+ "appraise func=MODULE_CHECK appraise_type=imasig",
+#endif
+#if IS_ENABLED(CONFIG_INTEGRITY_MACHINE_KEYRING) && IS_ENABLED(CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY)
+ "appraise func=POLICY_CHECK appraise_type=imasig",
+#endif
+ "measure func=MODULE_CHECK",
+ NULL
+};
+
+const char * const *arch_get_ima_policy(void)
+{
+ if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) {
+ if (IS_ENABLED(CONFIG_MODULE_SIG))
+ set_module_sig_enforced();
+ if (IS_ENABLED(CONFIG_KEXEC_SIG))
+ set_kexec_sig_enforced();
+ return sb_arch_rules;
+ }
+ return NULL;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 38477c9c3415..012a58959ff0 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -6,34 +7,44 @@
* Reiner Sailer <sailer@us.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima_fs.c
* implemenents security file system for reporting
* current measurement list and IMA statistics
*/
+
#include <linux/fcntl.h>
+#include <linux/kernel_read_file.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/parser.h>
+#include <linux/vmalloc.h>
#include "ima.h"
+static DEFINE_MUTEX(ima_write_mutex);
+
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+ ima_canonical_fmt = true;
+#endif
+ return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
static int valid_policy = 1;
-#define TMPBUFLEN 12
+
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
loff_t *ppos, atomic_long_t *val)
{
- char tmpbuf[TMPBUFLEN];
+ char tmpbuf[32]; /* greater than largest 'long' string value */
ssize_t len;
- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
@@ -88,8 +99,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
* against concurrent list-extension
*/
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next,
- struct ima_queue_entry, later);
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
@@ -100,7 +110,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
-static void ima_putc(struct seq_file *m, void *data, int datalen)
+void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
@@ -108,44 +118,80 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
/* print format:
* 32bit-le=pcr#
- * char[20]=template digest
+ * char[n]=template digest
* 32bit-le=template name size
* char[n]=template name
+ * [eventdata length]
* eventdata[n]=template specific data
*/
-static int ima_measurements_show(struct seq_file *m, void *v)
+int ima_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
- int namelen;
- u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ char *template_name;
+ u32 pcr, namelen, template_data_len; /* temporary fields */
+ bool is_ima_template = false;
+ enum hash_algo algo;
+ int i, algo_idx;
+
+ algo_idx = ima_sha1_idx;
+ algo = HASH_ALGO_SHA1;
+
+ if (m->file != NULL) {
+ algo_idx = (unsigned long)file_inode(m->file)->i_private;
+ algo = ima_algo_array[algo_idx].algo;
+ }
/* get entry */
e = qe->entry;
if (e == NULL)
return -1;
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
/*
* 1st: PCRIndex
- * PCR used is always the same (config option) in
- * little-endian format
+ * PCR used defaults to the same (config option) in
+ * little-endian format, unless set in policy
*/
- ima_putc(m, &pcr, sizeof pcr);
+ pcr = !ima_canonical_fmt ? e->pcr : (__force u32)cpu_to_le32(e->pcr);
+ ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
- ima_putc(m, e->digest, IMA_DIGEST_SIZE);
+ ima_putc(m, e->digests[algo_idx].digest, hash_digest_size[algo]);
/* 3rd: template name size */
- namelen = strlen(e->template_name);
- ima_putc(m, &namelen, sizeof namelen);
+ namelen = !ima_canonical_fmt ? strlen(template_name) :
+ (__force u32)cpu_to_le32(strlen(template_name));
+ ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, (void *)e->template_name, namelen);
+ ima_putc(m, template_name, strlen(template_name));
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
- /* 5th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_BINARY);
+ if (!is_ima_template) {
+ template_data_len = !ima_canonical_fmt ? e->template_data_len :
+ (__force u32)cpu_to_le32(e->template_data_len);
+ ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+ }
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ const struct ima_template_field *field =
+ e->template_desc->fields[i];
+
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ if (is_ima_template && strcmp(field->field_id, "n") == 0)
+ show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+ field->field_show(m, show, &e->template_data[i]);
+ }
return 0;
}
@@ -168,59 +214,59 @@ static const struct file_operations ima_measurements_ops = {
.release = seq_release,
};
-static void ima_print_digest(struct seq_file *m, u8 *digest)
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
{
- int i;
+ u32 i;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
+ for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
-{
- struct ima_template_data *entry = e;
- int namelen;
-
- switch (show) {
- case IMA_SHOW_ASCII:
- ima_print_digest(m, entry->digest);
- seq_printf(m, " %s\n", entry->file_name);
- break;
- case IMA_SHOW_BINARY:
- ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
-
- namelen = strlen(entry->file_name);
- ima_putc(m, &namelen, sizeof namelen);
- ima_putc(m, entry->file_name, namelen);
- default:
- break;
- }
-}
-
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ char *template_name;
+ enum hash_algo algo;
+ int i, algo_idx;
+
+ algo_idx = ima_sha1_idx;
+ algo = HASH_ALGO_SHA1;
+
+ if (m->file != NULL) {
+ algo_idx = (unsigned long)file_inode(m->file)->i_private;
+ algo = ima_algo_array[algo_idx].algo;
+ }
/* get entry */
e = qe->entry;
if (e == NULL)
return -1;
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
/* 1st: PCR used (config option) */
- seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
+ seq_printf(m, "%2d ", e->pcr);
- /* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest);
+ /* 2nd: template hash */
+ ima_print_digest(m, e->digests[algo_idx].digest, hash_digest_size[algo]);
/* 3th: template name */
- seq_printf(m, " %s ", e->template_name);
+ seq_printf(m, " %s", template_name);
/* 4th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_ASCII);
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
return 0;
}
@@ -243,10 +289,50 @@ static const struct file_operations ima_ascii_measurements_ops = {
.release = seq_release,
};
+static ssize_t ima_read_policy(char *path)
+{
+ void *data = NULL;
+ char *datap;
+ size_t size;
+ int rc, pathlen = strlen(path);
+
+ char *p;
+
+ /* remove \n */
+ datap = path;
+ strsep(&datap, "\n");
+
+ rc = kernel_read_file_from_path(path, 0, &data, INT_MAX, NULL,
+ READING_POLICY);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+ size = rc;
+ rc = 0;
+
+ datap = data;
+ while (size > 0 && (p = strsep(&datap, "\n"))) {
+ pr_debug("rule: %s\n", p);
+ rc = ima_parse_add_rule(p);
+ if (rc < 0)
+ break;
+ size -= rc;
+ }
+
+ vfree(data);
+ if (rc < 0)
+ return rc;
+ else if (size)
+ return -EINVAL;
+ else
+ return pathlen;
+}
+
static ssize_t ima_write_policy(struct file *file, const char __user *buf,
size_t datalen, loff_t *ppos)
{
- char *data = NULL;
+ char *data;
ssize_t result;
if (datalen >= PAGE_SIZE)
@@ -257,44 +343,106 @@ static ssize_t ima_write_policy(struct file *file, const char __user *buf,
if (*ppos != 0)
goto out;
- result = -ENOMEM;
- data = kmalloc(datalen + 1, GFP_KERNEL);
- if (!data)
- goto out;
-
- *(data + datalen) = '\0';
-
- result = -EFAULT;
- if (copy_from_user(data, buf, datalen))
+ data = memdup_user_nul(buf, datalen);
+ if (IS_ERR(data)) {
+ result = PTR_ERR(data);
goto out;
+ }
- result = ima_parse_add_rule(data);
+ result = mutex_lock_interruptible(&ima_write_mutex);
+ if (result < 0)
+ goto out_free;
+
+ if (data[0] == '/') {
+ result = ima_read_policy(data);
+ } else if (ima_appraise & IMA_APPRAISE_POLICY) {
+ pr_err("signed policy file (specified as an absolute pathname) required\n");
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", "signed policy required",
+ 1, 0);
+ result = -EACCES;
+ } else {
+ result = ima_parse_add_rule(data);
+ }
+ mutex_unlock(&ima_write_mutex);
+out_free:
+ kfree(data);
out:
if (result < 0)
valid_policy = 0;
- kfree(data);
+
return result;
}
static struct dentry *ima_dir;
-static struct dentry *binary_runtime_measurements;
-static struct dentry *ascii_runtime_measurements;
-static struct dentry *runtime_measurements_count;
-static struct dentry *violations;
-static struct dentry *ima_policy;
+static struct dentry *ima_symlink;
+
+enum ima_fs_flags {
+ IMA_FS_BUSY,
+};
+
+static unsigned long ima_fs_flags;
+
+#ifdef CONFIG_IMA_READ_POLICY
+static const struct seq_operations ima_policy_seqops = {
+ .start = ima_policy_start,
+ .next = ima_policy_next,
+ .stop = ima_policy_stop,
+ .show = ima_policy_show,
+};
+#endif
+
+static int __init create_securityfs_measurement_lists(void)
+{
+ int count = NR_BANKS(ima_tpm_chip);
+
+ if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip))
+ count++;
+
+ for (int i = 0; i < count; i++) {
+ u16 algo = ima_algo_array[i].algo;
+ char file_name[NAME_MAX + 1];
+ struct dentry *dentry;
+
+ sprintf(file_name, "ascii_runtime_measurements_%s",
+ hash_algo_name[algo]);
+ dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ ima_dir, (void *)(uintptr_t)i,
+ &ima_ascii_measurements_ops);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ sprintf(file_name, "binary_runtime_measurements_%s",
+ hash_algo_name[algo]);
+ dentry = securityfs_create_file(file_name, S_IRUSR | S_IRGRP,
+ ima_dir, (void *)(uintptr_t)i,
+ &ima_measurements_ops);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+
+ return 0;
+}
-static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-static int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode *inode, struct file *filp)
{
- /* No point in being allowed to open it if you aren't going to write */
- if (!(filp->f_flags & O_WRONLY))
+ if (!(filp->f_flags & O_WRONLY)) {
+#ifndef CONFIG_IMA_READ_POLICY
return -EACCES;
- if (atomic_dec_and_test(&policy_opencount))
- return 0;
- return -EBUSY;
+#else
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return seq_open(filp, &ima_policy_seqops);
+#endif
+ }
+ if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
+ return -EBUSY;
+ return 0;
}
/*
@@ -306,72 +454,114 @@ static int ima_open_policy(struct inode * inode, struct file * filp)
*/
static int ima_release_policy(struct inode *inode, struct file *file)
{
+ const char *cause = valid_policy ? "completed" : "failed";
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return seq_release(inode, file);
+
+ if (valid_policy && ima_check_policy() < 0) {
+ cause = "failed";
+ valid_policy = 0;
+ }
+
+ pr_info("policy update %s\n", cause);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", cause, !valid_policy, 0);
+
if (!valid_policy) {
ima_delete_rules();
valid_policy = 1;
- atomic_set(&policy_opencount, 1);
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
return 0;
}
+
ima_update_policy();
- securityfs_remove(ima_policy);
- ima_policy = NULL;
+#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
+ securityfs_remove(file->f_path.dentry);
+#elif defined(CONFIG_IMA_WRITE_POLICY)
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+#elif defined(CONFIG_IMA_READ_POLICY)
+ inode->i_mode &= ~S_IWUSR;
+#endif
return 0;
}
static const struct file_operations ima_measure_policy_ops = {
.open = ima_open_policy,
.write = ima_write_policy,
+ .read = seq_read,
.release = ima_release_policy,
.llseek = generic_file_llseek,
};
int __init ima_fs_init(void)
{
- ima_dir = securityfs_create_dir("ima", NULL);
- if (IS_ERR(ima_dir))
- return -1;
+ struct dentry *dentry;
+ int ret;
- binary_runtime_measurements =
- securityfs_create_file("binary_runtime_measurements",
- S_IRUSR | S_IRGRP, ima_dir, NULL,
- &ima_measurements_ops);
- if (IS_ERR(binary_runtime_measurements))
+ ret = integrity_fs_init();
+ if (ret < 0)
+ return ret;
+
+ ima_dir = securityfs_create_dir("ima", integrity_dir);
+ if (IS_ERR(ima_dir)) {
+ ret = PTR_ERR(ima_dir);
+ goto out;
+ }
+
+ ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima",
+ NULL);
+ if (IS_ERR(ima_symlink)) {
+ ret = PTR_ERR(ima_symlink);
goto out;
+ }
- ascii_runtime_measurements =
- securityfs_create_file("ascii_runtime_measurements",
- S_IRUSR | S_IRGRP, ima_dir, NULL,
- &ima_ascii_measurements_ops);
- if (IS_ERR(ascii_runtime_measurements))
+ ret = create_securityfs_measurement_lists();
+ if (ret != 0)
goto out;
- runtime_measurements_count =
- securityfs_create_file("runtime_measurements_count",
+ dentry = securityfs_create_symlink("binary_runtime_measurements", ima_dir,
+ "binary_runtime_measurements_sha1", NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = securityfs_create_symlink("ascii_runtime_measurements", ima_dir,
+ "ascii_runtime_measurements_sha1", NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = securityfs_create_file("runtime_measurements_count",
S_IRUSR | S_IRGRP, ima_dir, NULL,
&ima_measurements_count_ops);
- if (IS_ERR(runtime_measurements_count))
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
+ }
- violations =
- securityfs_create_file("violations", S_IRUSR | S_IRGRP,
+ dentry = securityfs_create_file("violations", S_IRUSR | S_IRGRP,
ima_dir, NULL, &ima_htable_violations_ops);
- if (IS_ERR(violations))
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
+ }
- ima_policy = securityfs_create_file("policy",
- S_IWUSR,
+ dentry = securityfs_create_file("policy", POLICY_FILE_FLAGS,
ima_dir, NULL,
&ima_measure_policy_ops);
- if (IS_ERR(ima_policy))
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto out;
+ }
return 0;
out:
- securityfs_remove(violations);
- securityfs_remove(runtime_measurements_count);
- securityfs_remove(ascii_runtime_measurements);
- securityfs_remove(binary_runtime_measurements);
+ securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
- return -1;
+ integrity_fs_fini();
+
+ return ret;
}
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
new file mode 100644
index 000000000000..00b249101f98
--- /dev/null
+++ b/security/integrity/ima/ima_iint.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * File: ima_iint.c
+ * - implements the IMA hook: ima_inode_free
+ * - cache integrity information in the inode security blob
+ */
+#include <linux/slab.h>
+
+#include "ima.h"
+
+static struct kmem_cache *ima_iint_cache __ro_after_init;
+
+/**
+ * ima_iint_find - Return the iint associated with an inode
+ * @inode: Pointer to the inode
+ *
+ * Return the IMA integrity information (iint) associated with an inode, if the
+ * inode was processed by IMA.
+ *
+ * Return: Found iint or NULL.
+ */
+struct ima_iint_cache *ima_iint_find(struct inode *inode)
+{
+ if (!IS_IMA(inode))
+ return NULL;
+
+ return ima_inode_get_iint(inode);
+}
+
+#define IMA_MAX_NESTING (FILESYSTEM_MAX_STACK_DEPTH + 1)
+
+/*
+ * It is not clear that IMA should be nested at all, but as long is it measures
+ * files both on overlayfs and on underlying fs, we need to annotate the iint
+ * mutex to avoid lockdep false positives related to IMA + overlayfs.
+ * See ovl_lockdep_annotate_inode_mutex_key() for more details.
+ */
+static inline void ima_iint_lockdep_annotate(struct ima_iint_cache *iint,
+ struct inode *inode)
+{
+#ifdef CONFIG_LOCKDEP
+ static struct lock_class_key ima_iint_mutex_key[IMA_MAX_NESTING];
+
+ int depth = inode->i_sb->s_stack_depth;
+
+ if (WARN_ON_ONCE(depth < 0 || depth >= IMA_MAX_NESTING))
+ depth = 0;
+
+ lockdep_set_class(&iint->mutex, &ima_iint_mutex_key[depth]);
+#endif
+}
+
+static void ima_iint_init_always(struct ima_iint_cache *iint,
+ struct inode *inode)
+{
+ iint->ima_hash = NULL;
+ iint->real_inode.version = 0;
+ iint->flags = 0UL;
+ iint->atomic_flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_read_status = INTEGRITY_UNKNOWN;
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
+ mutex_init(&iint->mutex);
+ ima_iint_lockdep_annotate(iint, inode);
+}
+
+static void ima_iint_free(struct ima_iint_cache *iint)
+{
+ kfree(iint->ima_hash);
+ mutex_destroy(&iint->mutex);
+ kmem_cache_free(ima_iint_cache, iint);
+}
+
+/**
+ * ima_inode_get - Find or allocate an iint associated with an inode
+ * @inode: Pointer to the inode
+ *
+ * Find an iint associated with an inode, and allocate a new one if not found.
+ * Caller must lock i_mutex.
+ *
+ * Return: An iint on success, NULL on error.
+ */
+struct ima_iint_cache *ima_inode_get(struct inode *inode)
+{
+ struct ima_iint_cache *iint;
+
+ iint = ima_iint_find(inode);
+ if (iint)
+ return iint;
+
+ iint = kmem_cache_alloc(ima_iint_cache, GFP_NOFS);
+ if (!iint)
+ return NULL;
+
+ ima_iint_init_always(iint, inode);
+
+ inode->i_flags |= S_IMA;
+ ima_inode_set_iint(inode, iint);
+
+ return iint;
+}
+
+/**
+ * ima_inode_free_rcu - Called to free an inode via a RCU callback
+ * @inode_security: The inode->i_security pointer
+ *
+ * Free the IMA data associated with an inode.
+ */
+void ima_inode_free_rcu(void *inode_security)
+{
+ struct ima_iint_cache **iint_p = inode_security + ima_blob_sizes.lbs_inode;
+
+ /* *iint_p should be NULL if !IS_IMA(inode) */
+ if (*iint_p)
+ ima_iint_free(*iint_p);
+}
+
+static void ima_iint_init_once(void *foo)
+{
+ struct ima_iint_cache *iint = (struct ima_iint_cache *)foo;
+
+ memset(iint, 0, sizeof(*iint));
+}
+
+void __init ima_iintcache_init(void)
+{
+ ima_iint_cache =
+ kmem_cache_create("ima_iint_cache", sizeof(struct ima_iint_cache),
+ 0, SLAB_PANIC, ima_iint_init_once);
+}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 162ea723db3d..a2f34f2d8ad7 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -6,28 +7,27 @@
* Leendert van Doorn <leendert@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima_init.c
* initialization and cleanup functions
*/
-#include <linux/module.h>
+
+#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/ima.h>
+#include <generated/utsrelease.h>
+
#include "ima.h"
/* name for boot aggregate entry */
-static const char *boot_aggregate_name = "boot_aggregate";
-int ima_used_chip;
+const char boot_aggregate_name[] = "boot_aggregate";
+struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
* the PCR register.
*
- * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
+ * Calculate the boot aggregate, a hash over tpm registers 0-7,
* assuming a TPM chip exists, and zeroes if the TPM chip does not
* exist. Add the boot aggregate measurement to the measurement
* list and extend the PCR register.
@@ -39,57 +39,124 @@ int ima_used_chip;
* a different value.) Violations add a zero entry to the measurement
* list and extend the aggregate PCR value with ff...ff's.
*/
-static void __init ima_add_boot_aggregate(void)
+static int __init ima_add_boot_aggregate(void)
{
- struct ima_template_entry *entry;
- const char *op = "add_boot_aggregate";
+ static const char op[] = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry;
+ struct ima_iint_cache tmp_iint, *iint = &tmp_iint;
+ struct ima_event_data event_data = { .iint = iint,
+ .filename = boot_aggregate_name };
+ struct ima_max_digest_data hash;
+ struct ima_digest_data *hash_hdr = container_of(&hash.hdr,
+ struct ima_digest_data, hdr);
int result = -ENOMEM;
- int violation = 1;
+ int violation = 0;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto err_out;
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = hash_hdr;
+ iint->ima_hash->algo = ima_hash_algo;
+ iint->ima_hash->length = hash_digest_size[ima_hash_algo];
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, boot_aggregate_name,
- IMA_EVENT_NAME_LEN_MAX);
- if (ima_used_chip) {
- violation = 0;
- result = ima_calc_boot_aggregate(entry->template.digest);
+ /*
+ * With TPM 2.0 hash agility, TPM chips could support multiple TPM
+ * PCR banks, allowing firmware to configure and enable different
+ * banks. The SHA1 bank is not necessarily enabled.
+ *
+ * Use the same hash algorithm for reading the TPM PCRs as for
+ * calculating the boot aggregate digest. Preference is given to
+ * the configured IMA default hash algorithm. Otherwise, use the
+ * TCG required banks - SHA256 for TPM 2.0, SHA1 for TPM 1.2.
+ * Ultimately select SHA1 also for TPM 2.0 if the SHA256 PCR bank
+ * is not found.
+ */
+ if (ima_tpm_chip) {
+ result = ima_calc_boot_aggregate(hash_hdr);
if (result < 0) {
audit_cause = "hashing_error";
- kfree(entry);
goto err_out;
}
}
- result = ima_store_template(entry, violation, NULL);
- if (result < 0)
- kfree(entry);
- return;
+
+ result = ima_alloc_init_template(&event_data, &entry, NULL);
+ if (result < 0) {
+ audit_cause = "alloc_entry";
+ goto err_out;
+ }
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name,
+ CONFIG_IMA_MEASURE_PCR_IDX);
+ if (result < 0) {
+ ima_free_template_entry(entry);
+ audit_cause = "store_entry";
+ goto err_out;
+ }
+ return 0;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
audit_cause, result, 0);
+ return result;
}
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void)
+{
+ int unset_flags = ima_policy_flag & IMA_APPRAISE;
+
+ ima_policy_flag &= ~unset_flags;
+ integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH);
+
+ /* load also EVM key to avoid appraisal */
+ evm_load_x509();
+
+ ima_policy_flag |= unset_flags;
+}
+#endif
+
int __init ima_init(void)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
int rc;
- ima_used_chip = 0;
- rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i);
- if (rc == 0)
- ima_used_chip = 1;
+ ima_tpm_chip = tpm_default_chip();
+ if (!ima_tpm_chip)
+ pr_info("No TPM chip found, activating TPM-bypass!\n");
- if (!ima_used_chip)
- pr_info("IMA: No TPM chip found, activating TPM-bypass!\n");
+ rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
+ if (rc)
+ return rc;
rc = ima_init_crypto();
if (rc)
return rc;
- ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
+ /* It can be called before ima_init_digests(), it does not use TPM. */
+ ima_load_kexec_buffer();
+
+ rc = ima_init_digests();
+ if (rc != 0)
+ return rc;
+ rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ if (rc != 0)
+ return rc;
+
ima_init_policy();
- return ima_fs_init();
+ rc = ima_fs_init();
+ if (rc != 0)
+ return rc;
+
+ ima_init_key_queue();
+
+ ima_init_reboot_notifier();
+
+ ima_measure_critical_data("kernel_info", "kernel_version",
+ UTS_RELEASE, strlen(UTS_RELEASE), false,
+ NULL, 0);
+
+ return rc;
}
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644
index 000000000000..7362f68f2d8b
--- /dev/null
+++ b/security/integrity/ima/ima_kexec.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ */
+
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include <linux/of.h>
+#include <linux/ima.h>
+#include <linux/reboot.h>
+#include <asm/page.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+#define IMA_KEXEC_EVENT_LEN 256
+
+static bool ima_kexec_update_registered;
+static struct seq_file ima_kexec_file;
+static size_t kexec_segment_size;
+static void *ima_kexec_buffer;
+
+static void ima_free_kexec_file_buf(struct seq_file *sf)
+{
+ vfree(sf->buf);
+ sf->buf = NULL;
+ sf->size = 0;
+ sf->read_pos = 0;
+ sf->count = 0;
+}
+
+void ima_measure_kexec_event(const char *event_name)
+{
+ char ima_kexec_event[IMA_KEXEC_EVENT_LEN];
+ size_t buf_size = 0;
+ long len;
+ int n;
+
+ buf_size = ima_get_binary_runtime_size();
+ len = atomic_long_read(&ima_htable.len);
+
+ n = scnprintf(ima_kexec_event, IMA_KEXEC_EVENT_LEN,
+ "kexec_segment_size=%lu;ima_binary_runtime_size=%lu;"
+ "ima_runtime_measurements_count=%ld;",
+ kexec_segment_size, buf_size, len);
+
+ ima_measure_critical_data("ima_kexec", event_name, ima_kexec_event, n, false, NULL, 0);
+}
+
+static int ima_alloc_kexec_file_buf(size_t segment_size)
+{
+ /*
+ * kexec 'load' may be called multiple times.
+ * Free and realloc the buffer only if the segment_size is
+ * changed from the previous kexec 'load' call.
+ */
+ if (ima_kexec_file.buf && ima_kexec_file.size == segment_size)
+ goto out;
+
+ ima_free_kexec_file_buf(&ima_kexec_file);
+
+ /* segment size can't change between kexec load and execute */
+ ima_kexec_file.buf = vmalloc(segment_size);
+ if (!ima_kexec_file.buf)
+ return -ENOMEM;
+
+ ima_kexec_file.size = segment_size;
+
+out:
+ ima_kexec_file.read_pos = 0;
+ ima_kexec_file.count = sizeof(struct ima_kexec_hdr); /* reserved space */
+ ima_measure_kexec_event("kexec_load");
+
+ return 0;
+}
+
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+{
+ struct ima_queue_entry *qe;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+ if (!ima_kexec_file.buf) {
+ pr_err("Kexec file buf not allocated\n");
+ return -EINVAL;
+ }
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ /* This is an append-only list, no need to hold the RCU read lock */
+ list_for_each_entry_rcu(qe, &ima_measurements, later, true) {
+ if (ima_kexec_file.count < ima_kexec_file.size) {
+ khdr.count++;
+ ima_measurements_show(&ima_kexec_file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ /*
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+ khdr.buffer_size = ima_kexec_file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+ memcpy(ima_kexec_file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump_debug("ima dump: ", DUMP_PREFIX_NONE, 16, 1,
+ ima_kexec_file.buf, ima_kexec_file.count < 100 ?
+ ima_kexec_file.count : 100,
+ true);
+
+ *buffer_size = ima_kexec_file.count;
+ *buffer = ima_kexec_file.buf;
+
+ return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_lock is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+ struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+ .buf_min = 0, .buf_max = ULONG_MAX,
+ .top_down = true };
+ unsigned long binary_runtime_size;
+ unsigned long extra_memory;
+
+ /* use more understandable variable names than defined in kbuf */
+ size_t kexec_buffer_size = 0;
+ void *kexec_buffer = NULL;
+ int ret;
+
+ if (image->type == KEXEC_TYPE_CRASH)
+ return;
+
+ /*
+ * Reserve extra memory for measurements added during kexec.
+ */
+ if (CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB <= 0)
+ extra_memory = PAGE_SIZE / 2;
+ else
+ extra_memory = CONFIG_IMA_KEXEC_EXTRA_MEMORY_KB * 1024;
+
+ binary_runtime_size = ima_get_binary_runtime_size() + extra_memory;
+
+ if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+ kexec_segment_size = ULONG_MAX;
+ else
+ kexec_segment_size = ALIGN(binary_runtime_size, PAGE_SIZE);
+
+ if ((kexec_segment_size == ULONG_MAX) ||
+ ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages() / 2)) {
+ pr_err("Binary measurement list too large.\n");
+ return;
+ }
+
+ ret = ima_alloc_kexec_file_buf(kexec_segment_size);
+ if (ret < 0) {
+ pr_err("Not enough memory for the kexec measurement buffer.\n");
+ return;
+ }
+
+ kbuf.buffer = kexec_buffer;
+ kbuf.bufsz = kexec_buffer_size;
+ kbuf.memsz = kexec_segment_size;
+ image->is_ima_segment_index_set = false;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ vfree(kexec_buffer);
+ return;
+ }
+
+ image->ima_buffer_addr = kbuf.mem;
+ image->ima_buffer_size = kexec_segment_size;
+ image->ima_buffer = kexec_buffer;
+ image->ima_segment_index = image->nr_segments - 1;
+ image->is_ima_segment_index_set = true;
+
+ kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+}
+
+/*
+ * Called during kexec execute so that IMA can update the measurement list.
+ */
+static int ima_update_kexec_buffer(struct notifier_block *self,
+ unsigned long action, void *data)
+{
+ size_t buf_size = 0;
+ int ret = NOTIFY_OK;
+ void *buf = NULL;
+
+ if (!kexec_in_progress) {
+ pr_info("No kexec in progress.\n");
+ return ret;
+ }
+
+ if (!ima_kexec_buffer) {
+ pr_err("Kexec buffer not set.\n");
+ return ret;
+ }
+
+ ret = ima_dump_measurement_list(&buf_size, &buf, kexec_segment_size);
+
+ if (ret)
+ pr_err("Dump measurements failed. Error:%d\n", ret);
+
+ if (buf_size != 0)
+ memcpy(ima_kexec_buffer, buf, buf_size);
+
+ kimage_unmap_segment(ima_kexec_buffer);
+ ima_kexec_buffer = NULL;
+
+ return ret;
+}
+
+static struct notifier_block update_buffer_nb = {
+ .notifier_call = ima_update_kexec_buffer,
+ .priority = INT_MIN
+};
+
+/*
+ * Create a mapping for the source pages that contain the IMA buffer
+ * so we can update it later.
+ */
+void ima_kexec_post_load(struct kimage *image)
+{
+ if (ima_kexec_buffer) {
+ kimage_unmap_segment(ima_kexec_buffer);
+ ima_kexec_buffer = NULL;
+ }
+
+ if (!image->ima_buffer_addr)
+ return;
+
+ ima_kexec_buffer = kimage_map_segment(image,
+ image->ima_buffer_addr,
+ image->ima_buffer_size);
+ if (!ima_kexec_buffer) {
+ pr_err("Could not map measurements buffer.\n");
+ return;
+ }
+
+ if (!ima_kexec_update_registered) {
+ register_reboot_notifier(&update_buffer_nb);
+ ima_kexec_update_registered = true;
+ }
+}
+
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void __init ima_load_kexec_buffer(void)
+{
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size = 0;
+ int rc;
+
+ rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+ switch (rc) {
+ case 0:
+ rc = ima_restore_measurement_list(kexec_buffer_size,
+ kexec_buffer);
+ if (rc != 0)
+ pr_err("Failed to restore the measurement list: %d\n",
+ rc);
+
+ ima_free_kexec_buffer();
+ break;
+ case -ENOTSUPP:
+ pr_debug("Restoring the measurement list not supported\n");
+ break;
+ case -ENOENT:
+ pr_debug("No measurement list to restore\n");
+ break;
+ default:
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index e9508d5bbfcf..5770cf691912 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -1,4 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
+ * Integrity Measurement Architecture
+ *
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
@@ -7,115 +10,206 @@
* Kylene Hall <kylene@us.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima_main.c
* implements the IMA hooks: ima_bprm_check, ima_file_mmap,
* and ima_file_check.
*/
+
#include <linux/module.h>
#include <linux/file.h>
#include <linux/binfmts.h>
+#include <linux/kernel_read_file.h>
#include <linux/mount.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/ima.h>
+#include <linux/fs.h>
+#include <linux/iversion.h>
+#include <linux/evm.h>
+#include <linux/crash_dump.h>
#include "ima.h"
-int ima_initialized;
-
#ifdef CONFIG_IMA_APPRAISE
int ima_appraise = IMA_APPRAISE_ENFORCE;
#else
int ima_appraise;
#endif
-char *ima_hash = "sha1";
+int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+static int ima_disabled __ro_after_init;
+
+static struct notifier_block ima_lsm_policy_notifier = {
+ .notifier_call = ima_lsm_policy_change,
+};
+
+static int __init ima_setup(char *str)
+{
+ if (!is_kdump_kernel()) {
+ pr_info("Warning: ima setup option only permitted in kdump");
+ return 1;
+ }
+
+ if (strncmp(str, "off", 3) == 0)
+ ima_disabled = 1;
+ else if (strncmp(str, "on", 2) == 0)
+ ima_disabled = 0;
+ else
+ pr_err("Invalid ima setup option: \"%s\" , please specify ima=on|off.", str);
+
+ return 1;
+}
+__setup("ima=", ima_setup);
+
static int __init hash_setup(char *str)
{
- if (strncmp(str, "md5", 3) == 0)
- ima_hash = "md5";
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0) {
+ ima_hash_algo = HASH_ALGO_SHA1;
+ } else if (strncmp(str, "md5", 3) == 0) {
+ ima_hash_algo = HASH_ALGO_MD5;
+ } else {
+ pr_err("invalid hash algorithm \"%s\" for template \"%s\"",
+ str, IMA_TEMPLATE_IMA_NAME);
+ return 1;
+ }
+ goto out;
+ }
+
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, str);
+ if (i < 0) {
+ pr_err("invalid hash algorithm \"%s\"", str);
+ return 1;
+ }
+
+ ima_hash_algo = i;
+out:
+ hash_setup_done = 1;
return 1;
}
__setup("ima_hash=", hash_setup);
+enum hash_algo ima_get_current_hash_algo(void)
+{
+ return ima_hash_algo;
+}
+
+/* Prevent mmap'ing a file execute that is already mmap'ed write */
+static int mmap_violation_check(enum ima_hooks func, struct file *file,
+ char **pathbuf, const char **pathname,
+ char *filename)
+{
+ struct inode *inode;
+ int rc = 0;
+
+ if ((func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) &&
+ mapping_writably_mapped(file->f_mapping)) {
+ rc = -ETXTBSY;
+ inode = file_inode(file);
+
+ if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ *pathname = ima_d_path(&file->f_path, pathbuf,
+ filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname,
+ "mmap_file", "mmapped_writers", rc, 0);
+ }
+ return rc;
+}
+
/*
* ima_rdwr_violation_check
*
* Only invalidate the PCR for measured files:
- * - Opening a file for write when already open for read,
+ * - Opening a file for write when already open for read,
* results in a time of measure, time of use (ToMToU) error.
* - Opening a file for read when already open for write,
- * could result in a file measurement error.
+ * could result in a file measurement error.
*
*/
-static void ima_rdwr_violation_check(struct file *file)
+static void ima_rdwr_violation_check(struct file *file,
+ struct ima_iint_cache *iint,
+ int must_measure,
+ char **pathbuf,
+ const char **pathname,
+ char *filename)
{
- struct dentry *dentry = file->f_path.dentry;
struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
- int must_measure;
bool send_tomtou = false, send_writers = false;
- char *pathbuf = NULL;
- const char *pathname;
-
- if (!S_ISREG(inode->i_mode) || !ima_initialized)
- return;
-
- mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */
if (mode & FMODE_WRITE) {
- if (atomic_read(&inode->i_readcount) && IS_IMA(inode))
- send_tomtou = true;
- goto out;
- }
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ if (!iint)
+ iint = ima_iint_find(inode);
- must_measure = ima_must_measure(inode, MAY_READ, FILE_CHECK);
- if (!must_measure)
- goto out;
+ /* IMA_MEASURE is set from reader side */
+ if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU,
+ &iint->atomic_flags))
+ send_tomtou = true;
+ }
+ } else {
+ if (must_measure)
+ set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags);
- if (atomic_read(&inode->i_writecount) > 0)
- send_writers = true;
-out:
- mutex_unlock(&inode->i_mutex);
+ /* Limit number of open_writers violations */
+ if (inode_is_open_for_write(inode) && must_measure) {
+ if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS,
+ &iint->atomic_flags))
+ send_writers = true;
+ }
+ }
if (!send_tomtou && !send_writers)
return;
- pathname = ima_d_path(&file->f_path, &pathbuf);
- if (!pathname || strlen(pathname) > IMA_EVENT_NAME_LEN_MAX)
- pathname = dentry->d_name.name;
+ *pathname = ima_d_path(&file->f_path, pathbuf, filename);
if (send_tomtou)
- ima_add_violation(inode, pathname,
+ ima_add_violation(file, *pathname, iint,
"invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(inode, pathname,
+ ima_add_violation(file, *pathname, iint,
"invalid_pcr", "open_writers");
- kfree(pathbuf);
}
-static void ima_check_last_writer(struct integrity_iint_cache *iint,
+static void ima_check_last_writer(struct ima_iint_cache *iint,
struct inode *inode, struct file *file)
{
fmode_t mode = file->f_mode;
+ bool update;
if (!(mode & FMODE_WRITE))
return;
- mutex_lock(&inode->i_mutex);
- if (atomic_read(&inode->i_writecount) == 1 &&
- iint->version != inode->i_version) {
- iint->flags &= ~IMA_DONE_MASK;
- if (iint->flags & IMA_APPRAISE)
- ima_update_xattr(iint, file);
+ mutex_lock(&iint->mutex);
+ if (atomic_read(&inode->i_writecount) == 1) {
+ struct kstat stat;
+
+ clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags);
+
+ update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ &iint->atomic_flags);
+ if ((iint->flags & IMA_NEW_FILE) ||
+ vfs_getattr_nosec(&file->f_path, &stat,
+ STATX_CHANGE_COOKIE,
+ AT_STATX_SYNC_AS_STAT) ||
+ !(stat.result_mask & STATX_CHANGE_COOKIE) ||
+ stat.change_cookie != iint->real_inode.version) {
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+ iint->measured_pcrs = 0;
+ if (update)
+ ima_update_xattr(iint, file);
+ }
}
- mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&iint->mutex);
}
/**
@@ -124,96 +218,261 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
*
* Flag files that changed, based on i_version
*/
-void ima_file_free(struct file *file)
+static void ima_file_free(struct file *file)
{
struct inode *inode = file_inode(file);
- struct integrity_iint_cache *iint;
+ struct ima_iint_cache *iint;
- if (!iint_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
return;
- iint = integrity_iint_find(inode);
+ iint = ima_iint_find(inode);
if (!iint)
return;
ima_check_last_writer(iint, inode, file);
}
-static int process_measurement(struct file *file, const char *filename,
- int mask, int function)
+static int process_measurement(struct file *file, const struct cred *cred,
+ struct lsm_prop *prop, char *buf, loff_t size,
+ int mask, enum ima_hooks func,
+ enum kernel_read_file_id read_id)
{
- struct inode *inode = file_inode(file);
- struct integrity_iint_cache *iint;
+ struct inode *real_inode, *inode = file_inode(file);
+ struct ima_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc = NULL;
+ struct inode *metadata_inode;
char *pathbuf = NULL;
+ char filename[NAME_MAX];
const char *pathname = NULL;
- int rc = -ENOMEM, action, must_appraise, _func;
+ int rc = 0, action, must_appraise = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ struct evm_ima_xattr_data *xattr_value = NULL;
+ struct modsig *modsig = NULL;
+ int xattr_len = 0;
+ bool violation_check;
+ enum hash_algo hash_algo;
+ unsigned int allowed_algos = 0;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
return 0;
/* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
* bitmask based on the appraise/audit/measurement policy.
* Included is the appraise submask.
*/
- action = ima_get_action(inode, mask, function);
- if (!action)
+ action = ima_get_action(file_mnt_idmap(file), inode, cred, prop,
+ mask, func, &pcr, &template_desc, NULL,
+ &allowed_algos);
+ violation_check = ((func == FILE_CHECK || func == MMAP_CHECK ||
+ func == MMAP_CHECK_REQPROT) &&
+ (ima_policy_flag & IMA_MEASURE) &&
+ ((action & IMA_MEASURE) ||
+ (file->f_mode & FMODE_WRITE)));
+ if (!action && !violation_check)
return 0;
must_appraise = action & IMA_APPRAISE;
/* Is the appraise rule hook specific? */
- _func = (action & IMA_FILE_APPRAISE) ? FILE_CHECK : function;
+ if (action & IMA_FILE_APPRAISE)
+ func = FILE_CHECK;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
- iint = integrity_inode_get(inode);
- if (!iint)
+ if (action) {
+ iint = ima_inode_get(inode);
+ if (!iint)
+ rc = -ENOMEM;
+ }
+
+ if (!rc && violation_check)
+ ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+ &pathbuf, &pathname, filename);
+
+ inode_unlock(inode);
+
+ if (rc)
+ goto out;
+ if (!action)
goto out;
+ mutex_lock(&iint->mutex);
+
+ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+ /*
+ * Reset appraisal flags (action and non-action rule-specific)
+ * if ima_inode_post_setattr was called.
+ */
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_NONACTION_RULE_FLAGS);
+
+ /*
+ * Re-evaulate the file if either the xattr has changed or the
+ * kernel has no way of detecting file change on the filesystem.
+ * (Limited to privileged mounted filesystems.)
+ */
+ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) ||
+ ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) &&
+ !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+
+ /*
+ * On stacked filesystems, detect and re-evaluate file data and
+ * metadata changes.
+ */
+ real_inode = d_real_inode(file_dentry(file));
+ if (real_inode != inode &&
+ (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) {
+ if (!IS_I_VERSION(real_inode) ||
+ integrity_inode_attrs_changed(&iint->real_inode,
+ real_inode)) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+
+ /*
+ * Reset the EVM status when metadata changed.
+ */
+ metadata_inode = d_inode(d_real(file_dentry(file),
+ D_REAL_METADATA));
+ if (evm_metadata_changed(inode, metadata_inode))
+ iint->flags &= ~(IMA_APPRAISED |
+ IMA_APPRAISED_SUBMASK);
+ }
+
/* Determine if already appraised/measured based on bitmask
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
* IMA_AUDIT, IMA_AUDITED)
*/
iint->flags |= action;
action &= IMA_DO_MASK;
- action &= ~((iint->flags & IMA_DONE_MASK) >> 1);
+ action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1);
+
+ /* If target pcr is already measured, unset IMA_MEASURE action */
+ if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr)))
+ action ^= IMA_MEASURE;
+
+ /* HASH sets the digital signature and update flags, nothing else */
+ if ((action & IMA_HASH) &&
+ !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) {
+ xattr_len = ima_read_xattr(file_dentry(file),
+ &xattr_value, xattr_len);
+ if ((xattr_value && xattr_len > 2) &&
+ (xattr_value->type == EVM_IMA_XATTR_DIGSIG))
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ iint->flags |= IMA_HASHED;
+ action ^= IMA_HASH;
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
/* Nothing to do, just return existing appraised status */
if (!action) {
- if (must_appraise)
- rc = ima_get_cache_status(iint, _func);
- goto out_digsig;
+ if (must_appraise) {
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ if (!rc)
+ rc = ima_get_cache_status(iint, func);
+ }
+ goto out_locked;
+ }
+
+ if ((action & IMA_APPRAISE_SUBMASK) ||
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ /* read 'security.ima' */
+ xattr_len = ima_read_xattr(file_dentry(file),
+ &xattr_value, xattr_len);
+
+ /*
+ * Read the appended modsig if allowed by the policy, and allow
+ * an additional measurement list entry, if needed, based on the
+ * template format and whether the file was already measured.
+ */
+ if (iint->flags & IMA_MODSIG_ALLOWED) {
+ rc = ima_read_modsig(func, buf, size, &modsig);
+
+ if (!rc && ima_template_has_modsig(template_desc) &&
+ iint->flags & IMA_MEASURED)
+ action |= IMA_MEASURE;
+ }
}
- rc = ima_collect_measurement(iint, file);
- if (rc != 0)
- goto out_digsig;
+ hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
- pathname = !filename ? ima_d_path(&file->f_path, &pathbuf) : filename;
- if (!pathname)
- pathname = (const char *)file->f_dentry->d_name.name;
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
+ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
+ goto out_locked;
+
+ /* Defer measuring/appraising kernel modules to READING_MODULE */
+ if (read_id == READING_MODULE_COMPRESSED) {
+ must_appraise = 0;
+ goto out_locked;
+ }
+
+ if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
if (action & IMA_MEASURE)
- ima_store_measurement(iint, file, pathname);
- if (action & IMA_APPRAISE_SUBMASK)
- rc = ima_appraise_measurement(_func, iint, file, pathname);
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len, modsig, pcr,
+ template_desc);
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
+ rc = ima_check_blacklist(iint, modsig, pcr);
+ if (rc != -EPERM) {
+ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file,
+ pathname, xattr_value,
+ xattr_len, modsig);
+ inode_unlock(inode);
+ }
+ if (!rc)
+ rc = mmap_violation_check(func, file, &pathbuf,
+ &pathname, filename);
+ }
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
- kfree(pathbuf);
-out_digsig:
- if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
+
+ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
+ rc = 0;
+
+ /* Ensure the digest was generated using an allowed algorithm */
+ if (rc == 0 && must_appraise && allowed_algos != 0 &&
+ (allowed_algos & (1U << hash_algo)) == 0) {
rc = -EACCES;
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, file_inode(file),
+ pathname, "collect_data",
+ "denied-hash-algorithm", rc, 0);
+ }
+out_locked:
+ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
+ !(iint->flags & IMA_NEW_FILE))
+ rc = -EACCES;
+ mutex_unlock(&iint->mutex);
+ kfree(xattr_value);
+ ima_free_modsig(modsig);
out:
- mutex_unlock(&inode->i_mutex);
- if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
- return -EACCES;
+ if (pathbuf)
+ __putname(pathbuf);
+ if (must_appraise) {
+ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
return 0;
}
/**
* ima_file_mmap - based on policy, collect/store measurement.
* @file: pointer to the file to be measured (May be NULL)
- * @prot: contains the protection that will be applied by the kernel.
+ * @reqprot: protection requested by the application
+ * @prot: protection that will be applied by the kernel
+ * @flags: operational flags
*
* Measure files being mmapped executable based on the ima_must_measure()
* policy decision.
@@ -221,14 +480,92 @@ out:
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_mmap(struct file *file, unsigned long prot)
+static int ima_file_mmap(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
- if (file && (prot & PROT_EXEC))
- return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK);
+ struct lsm_prop prop;
+ int ret;
+
+ if (!file)
+ return 0;
+
+ security_current_getlsmprop_subj(&prop);
+
+ if (reqprot & PROT_EXEC) {
+ ret = process_measurement(file, current_cred(), &prop, NULL,
+ 0, MAY_EXEC, MMAP_CHECK_REQPROT, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (prot & PROT_EXEC)
+ return process_measurement(file, current_cred(), &prop, NULL,
+ 0, MAY_EXEC, MMAP_CHECK, 0);
+
return 0;
}
/**
+ * ima_file_mprotect - based on policy, limit mprotect change
+ * @vma: vm_area_struct protection is set to
+ * @reqprot: protection requested by the application
+ * @prot: protection that will be applied by the kernel
+ *
+ * Files can be mmap'ed read/write and later changed to execute to circumvent
+ * IMA's mmap appraisal policy rules. Due to locking issues (mmap semaphore
+ * would be taken before i_mutex), files can not be measured or appraised at
+ * this point. Eliminate this integrity gap by denying the mprotect
+ * PROT_EXECUTE change, if an mmap appraise policy rule exists.
+ *
+ * On mprotect change success, return 0. On failure, return -EACESS.
+ */
+static int ima_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
+ unsigned long prot)
+{
+ struct ima_template_desc *template = NULL;
+ struct file *file;
+ char filename[NAME_MAX];
+ char *pathbuf = NULL;
+ const char *pathname = NULL;
+ struct inode *inode;
+ struct lsm_prop prop;
+ int result = 0;
+ int action;
+ int pcr;
+
+ /* Is mprotect making an mmap'ed file executable? */
+ if (!(ima_policy_flag & IMA_APPRAISE) || !vma->vm_file ||
+ !(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC))
+ return 0;
+
+ security_current_getlsmprop_subj(&prop);
+ inode = file_inode(vma->vm_file);
+ action = ima_get_action(file_mnt_idmap(vma->vm_file), inode,
+ current_cred(), &prop, MAY_EXEC, MMAP_CHECK,
+ &pcr, &template, NULL, NULL);
+ action |= ima_get_action(file_mnt_idmap(vma->vm_file), inode,
+ current_cred(), &prop, MAY_EXEC,
+ MMAP_CHECK_REQPROT, &pcr, &template, NULL,
+ NULL);
+
+ /* Is the mmap'ed file in policy? */
+ if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK)))
+ return 0;
+
+ if (action & IMA_APPRAISE_SUBMASK)
+ result = -EPERM;
+
+ file = vma->vm_file;
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, pathname,
+ "collect_data", "failed-mprotect", result, 0);
+ if (pathbuf)
+ __putname(pathbuf);
+
+ return result;
+}
+
+/**
* ima_bprm_check - based on policy, collect/store measurement.
* @bprm: contains the linux_binprm structure
*
@@ -241,66 +578,743 @@ int ima_file_mmap(struct file *file, unsigned long prot)
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_bprm_check(struct linux_binprm *bprm)
+static int ima_bprm_check(struct linux_binprm *bprm)
+{
+ struct lsm_prop prop;
+
+ security_current_getlsmprop_subj(&prop);
+ return process_measurement(bprm->file, current_cred(),
+ &prop, NULL, 0, MAY_EXEC, BPRM_CHECK, 0);
+}
+
+/**
+ * ima_creds_check - based on policy, collect/store measurement.
+ * @bprm: contains the linux_binprm structure
+ * @file: contains the file descriptor of the binary being executed
+ *
+ * The OS protects against an executable file, already open for write,
+ * from being executed in deny_write_access() and an executable file,
+ * already open for execute, from being modified in get_write_access().
+ * So we can be certain that what we verify and measure here is actually
+ * what is being executed.
+ *
+ * The difference from ima_bprm_check() is that ima_creds_check() is invoked
+ * only after determining the final binary to be executed without interpreter,
+ * and not when searching for intermediate binaries. The reason is that since
+ * commit 56305aa9b6fab ("exec: Compute file based creds only once"), the
+ * credentials to be applied to the process are calculated only at that stage
+ * (bprm_creds_from_file security hook instead of bprm_check_security).
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+static int ima_creds_check(struct linux_binprm *bprm, const struct file *file)
{
- return process_measurement(bprm->file,
- (strcmp(bprm->filename, bprm->interp) == 0) ?
- bprm->filename : bprm->interp,
- MAY_EXEC, BPRM_CHECK);
+ struct lsm_prop prop;
+
+ security_current_getlsmprop_subj(&prop);
+ return process_measurement((struct file *)file, bprm->cred, &prop, NULL,
+ 0, MAY_EXEC, CREDS_CHECK, 0);
}
/**
- * ima_path_check - based on policy, collect/store measurement.
+ * ima_bprm_creds_for_exec - collect/store/appraise measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure
+ * and appraise the integrity of a file to be executed by script interpreters.
+ * Unlike any of the other LSM hooks where the kernel enforces file integrity,
+ * enforcing file integrity is left up to the discretion of the script
+ * interpreter (userspace).
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+static int ima_bprm_creds_for_exec(struct linux_binprm *bprm)
+{
+ /*
+ * As security_bprm_check() is called multiple times, both
+ * the script and the shebang interpreter are measured, appraised,
+ * and audited. Limit usage of this LSM hook to just measuring,
+ * appraising, and auditing the indirect script execution
+ * (e.g. ./sh example.sh).
+ */
+ if (!bprm->is_check)
+ return 0;
+
+ return ima_bprm_check(bprm);
+}
+
+/**
+ * ima_file_check - based on policy, collect/store measurement.
* @file: pointer to the file to be measured
- * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
+ * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
*
* Measure files based on the ima_must_measure() policy decision.
*
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_file_check(struct file *file, int mask)
+static int ima_file_check(struct file *file, int mask)
+{
+ struct lsm_prop prop;
+
+ security_current_getlsmprop_subj(&prop);
+ return process_measurement(file, current_cred(), &prop, NULL, 0,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
+ MAY_APPEND), FILE_CHECK, 0);
+}
+
+static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf,
+ size_t buf_size)
+{
+ struct ima_iint_cache *iint = NULL, tmp_iint;
+ int rc, hash_algo;
+
+ if (ima_policy_flag) {
+ iint = ima_iint_find(inode);
+ if (iint)
+ mutex_lock(&iint->mutex);
+ }
+
+ if ((!iint || !(iint->flags & IMA_COLLECTED)) && file) {
+ if (iint)
+ mutex_unlock(&iint->mutex);
+
+ memset(&tmp_iint, 0, sizeof(tmp_iint));
+ mutex_init(&tmp_iint.mutex);
+
+ rc = ima_collect_measurement(&tmp_iint, file, NULL, 0,
+ ima_hash_algo, NULL);
+ if (rc < 0) {
+ /* ima_hash could be allocated in case of failure. */
+ if (rc != -ENOMEM)
+ kfree(tmp_iint.ima_hash);
+
+ return -EOPNOTSUPP;
+ }
+
+ iint = &tmp_iint;
+ mutex_lock(&iint->mutex);
+ }
+
+ if (!iint)
+ return -EOPNOTSUPP;
+
+ /*
+ * ima_file_hash can be called when ima_collect_measurement has still
+ * not been called, we might not always have a hash.
+ */
+ if (!iint->ima_hash || !(iint->flags & IMA_COLLECTED)) {
+ mutex_unlock(&iint->mutex);
+ return -EOPNOTSUPP;
+ }
+
+ if (buf) {
+ size_t copied_size;
+
+ copied_size = min_t(size_t, iint->ima_hash->length, buf_size);
+ memcpy(buf, iint->ima_hash->digest, copied_size);
+ }
+ hash_algo = iint->ima_hash->algo;
+ mutex_unlock(&iint->mutex);
+
+ if (iint == &tmp_iint)
+ kfree(iint->ima_hash);
+
+ return hash_algo;
+}
+
+/**
+ * ima_file_hash - return a measurement of the file
+ * @file: pointer to the file
+ * @buf: buffer in which to store the hash
+ * @buf_size: length of the buffer
+ *
+ * On success, return the hash algorithm (as defined in the enum hash_algo).
+ * If buf is not NULL, this function also outputs the hash into buf.
+ * If the hash is larger than buf_size, then only buf_size bytes will be copied.
+ * It generally just makes sense to pass a buffer capable of holding the largest
+ * possible hash: IMA_MAX_DIGEST_SIZE.
+ * The file hash returned is based on the entire file, including the appended
+ * signature.
+ *
+ * If the measurement cannot be performed, return -EOPNOTSUPP.
+ * If the parameters are incorrect, return -EINVAL.
+ */
+int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+{
+ if (!file)
+ return -EINVAL;
+
+ return __ima_inode_hash(file_inode(file), file, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(ima_file_hash);
+
+/**
+ * ima_inode_hash - return the stored measurement if the inode has been hashed
+ * and is in the iint cache.
+ * @inode: pointer to the inode
+ * @buf: buffer in which to store the hash
+ * @buf_size: length of the buffer
+ *
+ * On success, return the hash algorithm (as defined in the enum hash_algo).
+ * If buf is not NULL, this function also outputs the hash into buf.
+ * If the hash is larger than buf_size, then only buf_size bytes will be copied.
+ * It generally just makes sense to pass a buffer capable of holding the largest
+ * possible hash: IMA_MAX_DIGEST_SIZE.
+ * The hash returned is based on the entire contents, including the appended
+ * signature.
+ *
+ * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP.
+ * If the parameters are incorrect, return -EINVAL.
+ */
+int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+{
+ if (!inode)
+ return -EINVAL;
+
+ return __ima_inode_hash(inode, NULL, buf, buf_size);
+}
+EXPORT_SYMBOL_GPL(ima_inode_hash);
+
+/**
+ * ima_post_create_tmpfile - mark newly created tmpfile as new
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode of the newly created tmpfile
+ *
+ * No measuring, appraising or auditing of newly created tmpfiles is needed.
+ * Skip calling process_measurement(), but indicate which newly, created
+ * tmpfiles are in policy.
+ */
+static void ima_post_create_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode)
+
+{
+ struct ima_iint_cache *iint;
+ int must_appraise;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS,
+ FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ /* Nothing to do if we can't allocate memory */
+ iint = ima_inode_get(inode);
+ if (!iint)
+ return;
+
+ /* needed for writing the security xattrs */
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ iint->ima_file_status = INTEGRITY_PASS;
+}
+
+/**
+ * ima_post_path_mknod - mark as a new inode
+ * @idmap: idmap of the mount the inode was found from
+ * @dentry: newly created dentry
+ *
+ * Mark files created via the mknodat syscall as new, so that the
+ * file data can be written later.
+ */
+static void ima_post_path_mknod(struct mnt_idmap *idmap, struct dentry *dentry)
+{
+ struct ima_iint_cache *iint;
+ struct inode *inode = dentry->d_inode;
+ int must_appraise;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS,
+ FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ /* Nothing to do if we can't allocate memory */
+ iint = ima_inode_get(inode);
+ if (!iint)
+ return;
+
+ /* needed for re-opening empty files */
+ iint->flags |= IMA_NEW_FILE;
+}
+
+/**
+ * ima_read_file - pre-measure/appraise hook decision based on policy
+ * @file: pointer to the file to be measured/appraised/audit
+ * @read_id: caller identifier
+ * @contents: whether a subsequent call will be made to ima_post_read_file()
+ *
+ * Permit reading a file based on policy. The policy rules are written
+ * in terms of the policy identifier. Appraising the integrity of
+ * a file requires a file descriptor.
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+static int ima_read_file(struct file *file, enum kernel_read_file_id read_id,
+ bool contents)
{
- ima_rdwr_violation_check(file);
- return process_measurement(file, NULL,
- mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
- FILE_CHECK);
+ enum ima_hooks func;
+ struct lsm_prop prop;
+
+ /*
+ * Do devices using pre-allocated memory run the risk of the
+ * firmware being accessible to the device prior to the completion
+ * of IMA's signature verification any more than when using two
+ * buffers? It may be desirable to include the buffer address
+ * in this API and walk all the dma_map_single() mappings to check.
+ */
+
+ /*
+ * There will be a call made to ima_post_read_file() with
+ * a filled buffer, so we don't need to perform an extra
+ * read early here.
+ */
+ if (contents)
+ return 0;
+
+ /* Read entire file for all partial reads. */
+ func = read_idmap[read_id] ?: FILE_CHECK;
+ security_current_getlsmprop_subj(&prop);
+ return process_measurement(file, current_cred(), &prop, NULL, 0,
+ MAY_READ, func, 0);
}
-EXPORT_SYMBOL_GPL(ima_file_check);
+
+const int read_idmap[READING_MAX_ID] = {
+ [READING_FIRMWARE] = FIRMWARE_CHECK,
+ [READING_MODULE] = MODULE_CHECK,
+ [READING_MODULE_COMPRESSED] = MODULE_CHECK,
+ [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
+ [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
+ [READING_POLICY] = POLICY_CHECK
+};
/**
- * ima_module_check - based on policy, collect/store/appraise measurement.
- * @file: pointer to the file to be measured/appraised
+ * ima_post_read_file - in memory collect/appraise/audit measurement
+ * @file: pointer to the file to be measured/appraised/audit
+ * @buf: pointer to in memory file contents
+ * @size: size of in memory file contents
+ * @read_id: caller identifier
*
- * Measure/appraise kernel modules based on policy.
+ * Measure/appraise/audit in memory file based on policy. Policy rules
+ * are written in terms of a policy identifier.
*
* On success return 0. On integrity appraisal error, assuming the file
* is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
-int ima_module_check(struct file *file)
+static int ima_post_read_file(struct file *file, char *buf, loff_t size,
+ enum kernel_read_file_id read_id)
{
- if (!file) {
-#ifndef CONFIG_MODULE_SIG_FORCE
- if ((ima_appraise & IMA_APPRAISE_MODULES) &&
- (ima_appraise & IMA_APPRAISE_ENFORCE))
+ enum ima_hooks func;
+ struct lsm_prop prop;
+
+ /* permit signed certs */
+ if (!file && read_id == READING_X509_CERTIFICATE)
+ return 0;
+
+ if (!file || !buf || size == 0) { /* should never happen */
+ if (ima_appraise & IMA_APPRAISE_ENFORCE)
+ return -EACCES;
+ return 0;
+ }
+
+ func = read_idmap[read_id] ?: FILE_CHECK;
+ security_current_getlsmprop_subj(&prop);
+ return process_measurement(file, current_cred(), &prop, buf, size,
+ MAY_READ, func, read_id);
+}
+
+/**
+ * ima_load_data - appraise decision based on policy
+ * @id: kernel load data caller identifier
+ * @contents: whether the full contents will be available in a later
+ * call to ima_post_load_data().
+ *
+ * Callers of this LSM hook can not measure, appraise, or audit the
+ * data provided by userspace. Enforce policy rules requiring a file
+ * signature (eg. kexec'ed kernel image).
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+static int ima_load_data(enum kernel_load_data_id id, bool contents)
+{
+ bool ima_enforce, sig_enforce;
+
+ ima_enforce =
+ (ima_appraise & IMA_APPRAISE_ENFORCE) == IMA_APPRAISE_ENFORCE;
+
+ switch (id) {
+ case LOADING_KEXEC_IMAGE:
+ if (IS_ENABLED(CONFIG_KEXEC_SIG)
+ && arch_ima_get_secureboot()) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
+ return -EACCES;
+ }
+
+ if (ima_enforce && (ima_appraise & IMA_APPRAISE_KEXEC)) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
-#endif
- return 0; /* We rely on module signature checking */
+ }
+ break;
+ case LOADING_FIRMWARE:
+ if (ima_enforce && (ima_appraise & IMA_APPRAISE_FIRMWARE) && !contents) {
+ pr_err("Prevent firmware sysfs fallback loading.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_MODULE:
+ sig_enforce = is_module_sig_enforced();
+
+ if (ima_enforce && (!sig_enforce
+ && (ima_appraise & IMA_APPRAISE_MODULES))) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ default:
+ break;
}
- return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK);
+ return 0;
}
+/**
+ * ima_post_load_data - appraise decision based on policy
+ * @buf: pointer to in memory file contents
+ * @size: size of in memory file contents
+ * @load_id: kernel load data caller identifier
+ * @description: @load_id-specific description of contents
+ *
+ * Measure/appraise/audit in memory buffer based on policy. Policy rules
+ * are written in terms of a policy identifier.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+static int ima_post_load_data(char *buf, loff_t size,
+ enum kernel_load_data_id load_id,
+ char *description)
+{
+ if (load_id == LOADING_FIRMWARE) {
+ if ((ima_appraise & IMA_APPRAISE_FIRMWARE) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("Prevent firmware loading_store.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ return 0;
+ }
+
+ /*
+ * Measure the init_module syscall buffer containing the ELF image.
+ */
+ if (load_id == LOADING_MODULE)
+ ima_measure_critical_data("modules", "init_module",
+ buf, size, true, NULL, 0);
+
+ return 0;
+}
+
+/**
+ * process_buffer_measurement - Measure the buffer or the buffer data hash
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode associated with the object being measured (NULL for KEY_CHECK)
+ * @buf: pointer to the buffer that needs to be added to the log.
+ * @size: size of buffer(in bytes).
+ * @eventname: event name to be used for the buffer entry.
+ * @func: IMA hook
+ * @pcr: pcr to extend the measurement
+ * @func_data: func specific data, may be NULL
+ * @buf_hash: measure buffer data hash
+ * @digest: buffer digest will be written to
+ * @digest_len: buffer length
+ *
+ * Based on policy, either the buffer data or buffer data hash is measured
+ *
+ * Return: 0 if the buffer has been successfully measured, 1 if the digest
+ * has been written to the passed location but not added to a measurement entry,
+ * a negative value otherwise.
+ */
+int process_buffer_measurement(struct mnt_idmap *idmap,
+ struct inode *inode, const void *buf, int size,
+ const char *eventname, enum ima_hooks func,
+ int pcr, const char *func_data,
+ bool buf_hash, u8 *digest, size_t digest_len)
+{
+ int ret = 0;
+ const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry = NULL;
+ struct ima_iint_cache iint = {};
+ struct ima_event_data event_data = {.iint = &iint,
+ .filename = eventname,
+ .buf = buf,
+ .buf_len = size};
+ struct ima_template_desc *template;
+ struct ima_max_digest_data hash;
+ struct ima_digest_data *hash_hdr = container_of(&hash.hdr,
+ struct ima_digest_data, hdr);
+ char digest_hash[IMA_MAX_DIGEST_SIZE];
+ int digest_hash_len = hash_digest_size[ima_hash_algo];
+ int violation = 0;
+ int action = 0;
+ struct lsm_prop prop;
+
+ if (digest && digest_len < digest_hash_len)
+ return -EINVAL;
+
+ if (!ima_policy_flag && !digest)
+ return -ENOENT;
+
+ template = ima_template_desc_buf();
+ if (!template) {
+ ret = -EINVAL;
+ audit_cause = "ima_template_desc_buf";
+ goto out;
+ }
+
+ /*
+ * Both LSM hooks and auxiliary based buffer measurements are
+ * based on policy. To avoid code duplication, differentiate
+ * between the LSM hooks and auxiliary buffer measurements,
+ * retrieving the policy rule information only for the LSM hook
+ * buffer measurements.
+ */
+ if (func) {
+ security_current_getlsmprop_subj(&prop);
+ action = ima_get_action(idmap, inode, current_cred(),
+ &prop, 0, func, &pcr, &template,
+ func_data, NULL);
+ if (!(action & IMA_MEASURE) && !digest)
+ return -ENOENT;
+ }
+
+ if (!pcr)
+ pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+
+ iint.ima_hash = hash_hdr;
+ iint.ima_hash->algo = ima_hash_algo;
+ iint.ima_hash->length = hash_digest_size[ima_hash_algo];
+
+ ret = ima_calc_buffer_hash(buf, size, iint.ima_hash);
+ if (ret < 0) {
+ audit_cause = "hashing_error";
+ goto out;
+ }
+
+ if (buf_hash) {
+ memcpy(digest_hash, hash_hdr->digest, digest_hash_len);
+
+ ret = ima_calc_buffer_hash(digest_hash, digest_hash_len,
+ iint.ima_hash);
+ if (ret < 0) {
+ audit_cause = "hashing_error";
+ goto out;
+ }
+
+ event_data.buf = digest_hash;
+ event_data.buf_len = digest_hash_len;
+ }
+
+ if (digest)
+ memcpy(digest, iint.ima_hash->digest, digest_hash_len);
+
+ if (!ima_policy_flag || (func && !(action & IMA_MEASURE)))
+ return 1;
+
+ ret = ima_alloc_init_template(&event_data, &entry, template);
+ if (ret < 0) {
+ audit_cause = "alloc_entry";
+ goto out;
+ }
+
+ ret = ima_store_template(entry, violation, NULL, event_data.buf, pcr);
+ if (ret < 0) {
+ audit_cause = "store_entry";
+ ima_free_template_entry(entry);
+ }
+
+out:
+ if (ret < 0)
+ integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL, eventname,
+ func_measure_str(func),
+ audit_cause, ret, 0, ret);
+
+ return ret;
+}
+
+/**
+ * ima_kexec_cmdline - measure kexec cmdline boot args
+ * @kernel_fd: file descriptor of the kexec kernel being loaded
+ * @buf: pointer to buffer
+ * @size: size of buffer
+ *
+ * Buffers can only be measured, not appraised.
+ */
+void ima_kexec_cmdline(int kernel_fd, const void *buf, int size)
+{
+ if (!buf || !size)
+ return;
+
+ CLASS(fd, f)(kernel_fd);
+ if (fd_empty(f))
+ return;
+
+ process_buffer_measurement(file_mnt_idmap(fd_file(f)), file_inode(fd_file(f)),
+ buf, size, "kexec-cmdline", KEXEC_CMDLINE, 0,
+ NULL, false, NULL, 0);
+}
+
+/**
+ * ima_measure_critical_data - measure kernel integrity critical data
+ * @event_label: unique event label for grouping and limiting critical data
+ * @event_name: event name for the record in the IMA measurement list
+ * @buf: pointer to buffer data
+ * @buf_len: length of buffer data (in bytes)
+ * @hash: measure buffer data hash
+ * @digest: buffer digest will be written to
+ * @digest_len: buffer length
+ *
+ * Measure data critical to the integrity of the kernel into the IMA log
+ * and extend the pcr. Examples of critical data could be various data
+ * structures, policies, and states stored in kernel memory that can
+ * impact the integrity of the system.
+ *
+ * Return: 0 if the buffer has been successfully measured, 1 if the digest
+ * has been written to the passed location but not added to a measurement entry,
+ * a negative value otherwise.
+ */
+int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len)
+{
+ if (!event_name || !event_label || !buf || !buf_len)
+ return -ENOPARAM;
+
+ return process_buffer_measurement(&nop_mnt_idmap, NULL, buf, buf_len,
+ event_name, CRITICAL_DATA, 0,
+ event_label, hash, digest,
+ digest_len);
+}
+EXPORT_SYMBOL_GPL(ima_measure_critical_data);
+
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+
+/**
+ * ima_kernel_module_request - Prevent crypto-pkcs1(rsa,*) requests
+ * @kmod_name: kernel module name
+ *
+ * Avoid a verification loop where verifying the signature of the modprobe
+ * binary requires executing modprobe itself. Since the modprobe iint->mutex
+ * is already held when the signature verification is performed, a deadlock
+ * occurs as soon as modprobe is executed within the critical region, since
+ * the same lock cannot be taken again.
+ *
+ * This happens when public_key_verify_signature(), in case of RSA algorithm,
+ * use alg_name to store internal information in order to construct an
+ * algorithm on the fly, but crypto_larval_lookup() will try to use alg_name
+ * in order to load a kernel module with same name.
+ *
+ * Since we don't have any real "crypto-pkcs1(rsa,*)" kernel modules,
+ * we are safe to fail such module request from crypto_larval_lookup(), and
+ * avoid the verification loop.
+ *
+ * Return: Zero if it is safe to load the kernel module, -EINVAL otherwise.
+ */
+static int ima_kernel_module_request(char *kmod_name)
+{
+ if (strncmp(kmod_name, "crypto-pkcs1(rsa,", 17) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+
static int __init init_ima(void)
{
int error;
+ /*Note that turning IMA off is intentionally limited to kdump kernel.*/
+ if (ima_disabled && is_kdump_kernel()) {
+ pr_info("IMA functionality is disabled");
+ return 0;
+ }
+
+ ima_appraise_parse_cmdline();
+ ima_init_template_list();
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
+
+ if (error && strcmp(hash_algo_name[ima_hash_algo],
+ CONFIG_IMA_DEFAULT_HASH) != 0) {
+ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
+ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
+ hash_setup_done = 0;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ error = ima_init();
+ }
+
+ if (error)
+ return error;
+
+ error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier);
+ if (error)
+ pr_warn("Couldn't register LSM notifier, error %d\n", error);
+
if (!error)
- ima_initialized = 1;
+ ima_update_policy_flags();
+
return error;
}
-late_initcall(init_ima); /* Start IMA after the TPM is available */
+static struct security_hook_list ima_hooks[] __ro_after_init = {
+ LSM_HOOK_INIT(bprm_check_security, ima_bprm_check),
+ LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec),
+ LSM_HOOK_INIT(bprm_creds_from_file, ima_creds_check),
+ LSM_HOOK_INIT(file_post_open, ima_file_check),
+ LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile),
+ LSM_HOOK_INIT(file_release, ima_file_free),
+ LSM_HOOK_INIT(mmap_file, ima_file_mmap),
+ LSM_HOOK_INIT(file_mprotect, ima_file_mprotect),
+ LSM_HOOK_INIT(kernel_load_data, ima_load_data),
+ LSM_HOOK_INIT(kernel_post_load_data, ima_post_load_data),
+ LSM_HOOK_INIT(kernel_read_file, ima_read_file),
+ LSM_HOOK_INIT(kernel_post_read_file, ima_post_read_file),
+ LSM_HOOK_INIT(path_post_mknod, ima_post_path_mknod),
+#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
+ LSM_HOOK_INIT(key_post_create_or_update, ima_post_key_create_or_update),
+#endif
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+ LSM_HOOK_INIT(kernel_module_request, ima_kernel_module_request),
+#endif
+ LSM_HOOK_INIT(inode_free_security_rcu, ima_inode_free_rcu),
+};
+
+static const struct lsm_id ima_lsmid = {
+ .name = "ima",
+ .id = LSM_ID_IMA,
+};
+
+static int __init init_ima_lsm(void)
+{
+ ima_iintcache_init();
+ security_add_hooks(ima_hooks, ARRAY_SIZE(ima_hooks), &ima_lsmid);
+ init_ima_appraise_lsm(&ima_lsmid);
+ return 0;
+}
+
+struct lsm_blob_sizes ima_blob_sizes __ro_after_init = {
+ .lbs_inode = sizeof(struct ima_iint_cache *),
+};
-MODULE_DESCRIPTION("Integrity Measurement Architecture");
-MODULE_LICENSE("GPL");
+DEFINE_LSM(ima) = {
+ .id = &ima_lsmid,
+ .init = init_ima_lsm,
+ .order = LSM_ORDER_LAST,
+ .blobs = &ima_blob_sizes,
+ /* Start IMA after the TPM is available */
+ .initcall_late = init_ima,
+};
diff --git a/security/integrity/ima/ima_modsig.c b/security/integrity/ima/ima_modsig.c
new file mode 100644
index 000000000000..3265d744d5ce
--- /dev/null
+++ b/security/integrity/ima/ima_modsig.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IMA support for appraising module-style appended signatures.
+ *
+ * Copyright (C) 2019 IBM Corporation
+ *
+ * Author:
+ * Thiago Jung Bauermann <bauerman@linux.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module_signature.h>
+#include <keys/asymmetric-type.h>
+#include <crypto/pkcs7.h>
+
+#include "ima.h"
+
+struct modsig {
+ struct pkcs7_message *pkcs7_msg;
+
+ enum hash_algo hash_algo;
+
+ /* This digest will go in the 'd-modsig' field of the IMA template. */
+ const u8 *digest;
+ u32 digest_size;
+
+ /*
+ * This is what will go to the measurement list if the template requires
+ * storing the signature.
+ */
+ int raw_pkcs7_len;
+ u8 raw_pkcs7[] __counted_by(raw_pkcs7_len);
+};
+
+/*
+ * ima_read_modsig - Read modsig from buf.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int ima_read_modsig(enum ima_hooks func, const void *buf, loff_t buf_len,
+ struct modsig **modsig)
+{
+ const size_t marker_len = strlen(MODULE_SIG_STRING);
+ const struct module_signature *sig;
+ struct modsig *hdr;
+ size_t sig_len;
+ const void *p;
+ int rc;
+
+ if (buf_len <= marker_len + sizeof(*sig))
+ return -ENOENT;
+
+ p = buf + buf_len - marker_len;
+ if (memcmp(p, MODULE_SIG_STRING, marker_len))
+ return -ENOENT;
+
+ buf_len -= marker_len;
+ sig = (const struct module_signature *)(p - sizeof(*sig));
+
+ rc = mod_check_sig(sig, buf_len, func_tokens[func]);
+ if (rc)
+ return rc;
+
+ sig_len = be32_to_cpu(sig->sig_len);
+ buf_len -= sig_len + sizeof(*sig);
+
+ /* Allocate sig_len additional bytes to hold the raw PKCS#7 data. */
+ hdr = kzalloc(struct_size(hdr, raw_pkcs7, sig_len), GFP_KERNEL);
+ if (!hdr)
+ return -ENOMEM;
+
+ hdr->raw_pkcs7_len = sig_len;
+ hdr->pkcs7_msg = pkcs7_parse_message(buf + buf_len, sig_len);
+ if (IS_ERR(hdr->pkcs7_msg)) {
+ rc = PTR_ERR(hdr->pkcs7_msg);
+ kfree(hdr);
+ return rc;
+ }
+
+ memcpy(hdr->raw_pkcs7, buf + buf_len, sig_len);
+
+ /* We don't know the hash algorithm yet. */
+ hdr->hash_algo = HASH_ALGO__LAST;
+
+ *modsig = hdr;
+
+ return 0;
+}
+
+/**
+ * ima_collect_modsig - Calculate the file hash without the appended signature.
+ * @modsig: parsed module signature
+ * @buf: data to verify the signature on
+ * @size: data size
+ *
+ * Since the modsig is part of the file contents, the hash used in its signature
+ * isn't the same one ordinarily calculated by IMA. Therefore PKCS7 code
+ * calculates a separate one for signature verification.
+ */
+void ima_collect_modsig(struct modsig *modsig, const void *buf, loff_t size)
+{
+ int rc;
+
+ /*
+ * Provide the file contents (minus the appended sig) so that the PKCS7
+ * code can calculate the file hash.
+ */
+ size -= modsig->raw_pkcs7_len + strlen(MODULE_SIG_STRING) +
+ sizeof(struct module_signature);
+ rc = pkcs7_supply_detached_data(modsig->pkcs7_msg, buf, size);
+ if (rc)
+ return;
+
+ /* Ask the PKCS7 code to calculate the file hash. */
+ rc = pkcs7_get_digest(modsig->pkcs7_msg, &modsig->digest,
+ &modsig->digest_size, &modsig->hash_algo);
+}
+
+int ima_modsig_verify(struct key *keyring, const struct modsig *modsig)
+{
+ return verify_pkcs7_message_sig(NULL, 0, modsig->pkcs7_msg, keyring,
+ VERIFYING_MODULE_SIGNATURE, NULL, NULL);
+}
+
+int ima_get_modsig_digest(const struct modsig *modsig, enum hash_algo *algo,
+ const u8 **digest, u32 *digest_size)
+{
+ *algo = modsig->hash_algo;
+ *digest = modsig->digest;
+ *digest_size = modsig->digest_size;
+
+ return 0;
+}
+
+int ima_get_raw_modsig(const struct modsig *modsig, const void **data,
+ u32 *data_len)
+{
+ *data = &modsig->raw_pkcs7;
+ *data_len = modsig->raw_pkcs7_len;
+
+ return 0;
+}
+
+void ima_free_modsig(struct modsig *modsig)
+{
+ if (!modsig)
+ return;
+
+ pkcs7_free_message(modsig->pkcs7_msg);
+ kfree(modsig);
+}
diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
new file mode 100644
index 000000000000..95cc31525c57
--- /dev/null
+++ b/security/integrity/ima/ima_mok.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Juniper Networks, Inc.
+ *
+ * Author:
+ * Petko Manolov <petko.manolov@konsulko.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <keys/system_keyring.h>
+
+
+struct key *ima_blacklist_keyring;
+
+/*
+ * Allocate the IMA blacklist keyring
+ */
+static __init int ima_mok_init(void)
+{
+ struct key_restriction *restriction;
+
+ pr_notice("Allocating IMA blacklist keyring.\n");
+
+ restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+ if (!restriction)
+ panic("Can't allocate IMA blacklist restriction.");
+
+ restriction->check = restrict_link_by_builtin_trusted;
+
+ ima_blacklist_keyring = keyring_alloc(".ima_blacklist",
+ KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_WRITE | KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_SET_KEEP,
+ restriction, NULL);
+
+ if (IS_ERR(ima_blacklist_keyring))
+ panic("Can't allocate IMA blacklist keyring.");
+ return 0;
+}
+device_initcall(ima_mok_init);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 399433ad614e..8fbd8755f5bc 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -1,32 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 IBM Corporation
* Author: Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
* ima_policy.c
- * - initialize default measure policy rules
- *
+ * - initialize default measure policy rules
*/
-#include <linux/module.h>
+
+#include <linux/init.h>
#include <linux/list.h>
+#include <linux/kernel_read_file.h>
+#include <linux/fs.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/parser.h>
#include <linux/slab.h>
-#include <linux/genhd.h>
+#include <linux/rculist.h>
+#include <linux/seq_file.h>
+#include <linux/ima.h>
#include "ima.h"
/* flags definitions */
-#define IMA_FUNC 0x0001
-#define IMA_MASK 0x0002
+#define IMA_FUNC 0x0001
+#define IMA_MASK 0x0002
#define IMA_FSMAGIC 0x0004
#define IMA_UID 0x0008
#define IMA_FOWNER 0x0010
#define IMA_FSUUID 0x0020
+#define IMA_INMASK 0x0040
+#define IMA_EUID 0x0080
+#define IMA_PCR 0x0100
+#define IMA_FSNAME 0x0200
+#define IMA_KEYRINGS 0x0400
+#define IMA_LABEL 0x0800
+#define IMA_VALIDATE_ALGOS 0x1000
+#define IMA_GID 0x2000
+#define IMA_EGID 0x4000
+#define IMA_FGROUP 0x8000
+#define IMA_FS_SUBTYPE 0x10000
#define UNKNOWN 0
#define MEASURE 0x0001 /* same as IMA_MEASURE */
@@ -34,12 +46,57 @@
#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
#define DONT_APPRAISE 0x0008
#define AUDIT 0x0040
+#define DONT_AUDIT 0x0080
+#define HASH 0x0100
+#define DONT_HASH 0x0200
+
+#define INVALID_PCR(a) (((a) < 0) || \
+ (a) >= (sizeof_field(struct ima_iint_cache, measured_pcrs) * 8))
+
+int ima_policy_flag;
+static int temp_ima_appraise;
+static int build_ima_appraise __ro_after_init;
+
+atomic_t ima_setxattr_allowed_hash_algorithms;
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
};
+enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+
+enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
+
+struct ima_rule_opt_list {
+ size_t count;
+ char *items[] __counted_by(count);
+};
+
+/*
+ * These comparators are needed nowhere outside of ima so just define them here.
+ * This pattern should hopefully never be needed outside of ima.
+ */
+static inline bool vfsuid_gt_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return __vfsuid_val(vfsuid) > __kuid_val(kuid);
+}
+
+static inline bool vfsgid_gt_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return __vfsgid_val(vfsgid) > __kgid_val(kgid);
+}
+
+static inline bool vfsuid_lt_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return __vfsuid_val(vfsuid) < __kuid_val(kuid);
+}
+
+static inline bool vfsgid_lt_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return __vfsgid_val(vfsgid) < __kgid_val(kgid);
+}
+
struct ima_rule_entry {
struct list_head list;
int action;
@@ -47,19 +104,41 @@ struct ima_rule_entry {
enum ima_hooks func;
int mask;
unsigned long fsmagic;
- u8 fsuuid[16];
+ uuid_t fsuuid;
kuid_t uid;
+ kgid_t gid;
kuid_t fowner;
+ kgid_t fgroup;
+ bool (*uid_op)(kuid_t cred_uid, kuid_t rule_uid); /* Handlers for operators */
+ bool (*gid_op)(kgid_t cred_gid, kgid_t rule_gid);
+ bool (*fowner_op)(vfsuid_t vfsuid, kuid_t rule_uid); /* vfsuid_eq_kuid(), vfsuid_gt_kuid(), vfsuid_lt_kuid() */
+ bool (*fgroup_op)(vfsgid_t vfsgid, kgid_t rule_gid); /* vfsgid_eq_kgid(), vfsgid_gt_kgid(), vfsgid_lt_kgid() */
+ int pcr;
+ unsigned int allowed_algos; /* bitfield of allowed hash algorithms */
struct {
void *rule; /* LSM file metadata specific */
- void *args_p; /* audit value */
+ char *args_p; /* audit value */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
+ char *fsname;
+ char *fs_subtype;
+ struct ima_rule_opt_list *keyrings; /* Measure keys added to these keyrings */
+ struct ima_rule_opt_list *label; /* Measure data grouped under this label */
+ struct ima_template_desc *template;
};
/*
+ * sanity check in case the kernels gains more hash algorithms that can
+ * fit in an unsigned int
+ */
+static_assert(
+ 8 * sizeof(unsigned int) >= HASH_ALGO__LAST,
+ "The bitfield allowed_algos in ima_rule_entry is too small to contain all the supported hash algorithms, consider using a bigger type");
+
+/*
* Without LSM specific knowledge, the default policy can only be
- * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, .gid,
+ * .fowner, and .fgroup
*/
/*
@@ -68,299 +147,959 @@ struct ima_rule_entry {
* normal users can easily run the machine out of memory simply building
* and running executables.
*/
-static struct ima_rule_entry default_rules[] = {
- {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
- {.action = MEASURE,.func = MMAP_CHECK,.mask = MAY_EXEC,
+static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
+ {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .func = FILE_CHECK,
+ .flags = IMA_FSMAGIC | IMA_FUNC},
+ {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC}
+};
+
+static struct ima_rule_entry original_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID,
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
- {.action = MEASURE,.func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = POLICY_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
+ {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = EFIVARFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+#ifdef CONFIG_IMA_WRITE_POLICY
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &vfsuid_eq_kuid,
+ .flags = IMA_FOWNER},
+#else
+ /* force signature */
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &vfsuid_eq_kuid,
+ .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
+#endif
};
-static struct ima_rule_entry default_appraise_rules[] = {
- {.action = DONT_APPRAISE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_APPRAISE,.fsmagic = CGROUP_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = APPRAISE,.fowner = GLOBAL_ROOT_UID,.flags = IMA_FOWNER},
+static struct ima_rule_entry build_appraise_rules[] __ro_after_init = {
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
};
+static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED | IMA_MODSIG_ALLOWED |
+ IMA_CHECK_BLACKLIST},
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+};
+
+static struct ima_rule_entry critical_data_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = CRITICAL_DATA, .flags = IMA_FUNC},
+};
+
+/* An array of architecture specific rules */
+static struct ima_rule_entry *arch_policy_entry __ro_after_init;
+
static LIST_HEAD(ima_default_rules);
static LIST_HEAD(ima_policy_rules);
-static struct list_head *ima_rules;
+static LIST_HEAD(ima_temp_rules);
+static struct list_head __rcu *ima_rules = (struct list_head __rcu *)(&ima_default_rules);
-static DEFINE_MUTEX(ima_rules_mutex);
+static int ima_policy __initdata;
-static bool ima_use_tcb __initdata;
static int __init default_measure_policy_setup(char *str)
{
- ima_use_tcb = 1;
+ if (ima_policy)
+ return 1;
+
+ ima_policy = ORIGINAL_TCB;
return 1;
}
__setup("ima_tcb", default_measure_policy_setup);
static bool ima_use_appraise_tcb __initdata;
+static bool ima_use_secure_boot __initdata;
+static bool ima_use_critical_data __initdata;
+static bool ima_fail_unverifiable_sigs __ro_after_init;
+static int __init policy_setup(char *str)
+{
+ char *p;
+
+ while ((p = strsep(&str, " |\n")) != NULL) {
+ if (*p == ' ')
+ continue;
+ if ((strcmp(p, "tcb") == 0) && !ima_policy)
+ ima_policy = DEFAULT_TCB;
+ else if (strcmp(p, "appraise_tcb") == 0)
+ ima_use_appraise_tcb = true;
+ else if (strcmp(p, "secure_boot") == 0)
+ ima_use_secure_boot = true;
+ else if (strcmp(p, "critical_data") == 0)
+ ima_use_critical_data = true;
+ else if (strcmp(p, "fail_securely") == 0)
+ ima_fail_unverifiable_sigs = true;
+ else
+ pr_err("policy \"%s\" not found", p);
+ }
+
+ return 1;
+}
+__setup("ima_policy=", policy_setup);
+
static int __init default_appraise_policy_setup(char *str)
{
- ima_use_appraise_tcb = 1;
+ ima_use_appraise_tcb = true;
return 1;
}
__setup("ima_appraise_tcb", default_appraise_policy_setup);
-/*
- * Although the IMA policy does not change, the LSM policy can be
- * reloaded, leaving the IMA LSM based rules referring to the old,
- * stale LSM policy.
- *
- * Update the IMA LSM based rules to reflect the reloaded LSM policy.
- * We assume the rules still exist; and BUG_ON() if they don't.
+static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
+{
+ struct ima_rule_opt_list *opt_list;
+ size_t count = 0;
+ char *src_copy;
+ char *cur, *next;
+ size_t i;
+
+ src_copy = match_strdup(src);
+ if (!src_copy)
+ return ERR_PTR(-ENOMEM);
+
+ next = src_copy;
+ while ((cur = strsep(&next, "|"))) {
+ /* Don't accept an empty list item */
+ if (!(*cur)) {
+ kfree(src_copy);
+ return ERR_PTR(-EINVAL);
+ }
+ count++;
+ }
+
+ /* Don't accept an empty list */
+ if (!count) {
+ kfree(src_copy);
+ return ERR_PTR(-EINVAL);
+ }
+
+ opt_list = kzalloc(struct_size(opt_list, items, count), GFP_KERNEL);
+ if (!opt_list) {
+ kfree(src_copy);
+ return ERR_PTR(-ENOMEM);
+ }
+ opt_list->count = count;
+
+ /*
+ * strsep() has already replaced all instances of '|' with '\0',
+ * leaving a byte sequence of NUL-terminated strings. Reference each
+ * string with the array of items.
+ *
+ * IMPORTANT: Ownership of the allocated buffer is transferred from
+ * src_copy to the first element in the items array. To free the
+ * buffer, kfree() must only be called on the first element of the
+ * array.
+ */
+ for (i = 0, cur = src_copy; i < count; i++) {
+ opt_list->items[i] = cur;
+ cur = strchr(cur, '\0') + 1;
+ }
+
+ return opt_list;
+}
+
+static void ima_free_rule_opt_list(struct ima_rule_opt_list *opt_list)
+{
+ if (!opt_list)
+ return;
+
+ if (opt_list->count) {
+ kfree(opt_list->items[0]);
+ opt_list->count = 0;
+ }
+
+ kfree(opt_list);
+}
+
+static void ima_lsm_free_rule(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry->lsm[i].args_p);
+ }
+}
+
+static void ima_free_rule(struct ima_rule_entry *entry)
+{
+ if (!entry)
+ return;
+
+ /*
+ * entry->template->fields may be allocated in ima_parse_rule() but that
+ * reference is owned by the corresponding ima_template_desc element in
+ * the defined_templates list and cannot be freed here
+ */
+ kfree(entry->fsname);
+ kfree(entry->fs_subtype);
+ ima_free_rule_opt_list(entry->keyrings);
+ ima_lsm_free_rule(entry);
+ kfree(entry);
+}
+
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
+ gfp_t gfp)
+{
+ struct ima_rule_entry *nentry;
+ int i;
+
+ /*
+ * Immutable elements are copied over as pointers and data; only
+ * lsm rules can change
+ */
+ nentry = kmemdup(entry, sizeof(*nentry), gfp);
+ if (!nentry)
+ return NULL;
+
+ memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm));
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].args_p)
+ continue;
+
+ nentry->lsm[i].type = entry->lsm[i].type;
+ nentry->lsm[i].args_p = entry->lsm[i].args_p;
+
+ ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
+ nentry->lsm[i].args_p,
+ &nentry->lsm[i].rule,
+ gfp);
+ if (!nentry->lsm[i].rule)
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ nentry->lsm[i].args_p);
+ }
+ return nentry;
+}
+
+static int ima_lsm_update_rule(struct ima_rule_entry *entry)
+{
+ int i;
+ struct ima_rule_entry *nentry;
+
+ nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
+ if (!nentry)
+ return -ENOMEM;
+
+ list_replace_rcu(&entry->list, &nentry->list);
+ synchronize_rcu();
+ /*
+ * ima_lsm_copy_rule() shallow copied all references, except for the
+ * LSM references, from entry to nentry so we only want to free the LSM
+ * references and the entry itself. All other memory references will now
+ * be owned by nentry.
+ */
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ ima_filter_rule_free(entry->lsm[i].rule);
+ kfree(entry);
+
+ return 0;
+}
+
+static bool ima_rule_contains_lsm_cond(struct ima_rule_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ if (entry->lsm[i].args_p)
+ return true;
+
+ return false;
+}
+
+/*
+ * The LSM policy can be reloaded, leaving the IMA LSM based rules referring
+ * to the old, stale LSM policy. Update the IMA LSM based rules to reflect
+ * the reloaded LSM policy.
*/
static void ima_lsm_update_rules(void)
{
- struct ima_rule_entry *entry, *tmp;
+ struct ima_rule_entry *entry, *e;
int result;
- int i;
- mutex_lock(&ima_rules_mutex);
- list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
- for (i = 0; i < MAX_LSM_RULES; i++) {
- if (!entry->lsm[i].rule)
- continue;
- result = security_filter_rule_init(entry->lsm[i].type,
- Audit_equal,
- entry->lsm[i].args_p,
- &entry->lsm[i].rule);
- BUG_ON(!entry->lsm[i].rule);
+ list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
+ if (!ima_rule_contains_lsm_cond(entry))
+ continue;
+
+ result = ima_lsm_update_rule(entry);
+ if (result) {
+ pr_err("lsm rule update error %d\n", result);
+ return;
+ }
+ }
+}
+
+int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
+ void *lsm_data)
+{
+ if (event != LSM_POLICY_CHANGE)
+ return NOTIFY_DONE;
+
+ ima_lsm_update_rules();
+ return NOTIFY_OK;
+}
+
+/**
+ * ima_match_rule_data - determine whether func_data matches the policy rule
+ * @rule: a pointer to a rule
+ * @func_data: data to match against the measure rule data
+ * @cred: a pointer to a credentials structure for user validation
+ *
+ * Returns true if func_data matches one in the rule, false otherwise.
+ */
+static bool ima_match_rule_data(struct ima_rule_entry *rule,
+ const char *func_data,
+ const struct cred *cred)
+{
+ const struct ima_rule_opt_list *opt_list = NULL;
+ bool matched = false;
+ size_t i;
+
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+
+ switch (rule->func) {
+ case KEY_CHECK:
+ if (!rule->keyrings)
+ return true;
+
+ opt_list = rule->keyrings;
+ break;
+ case CRITICAL_DATA:
+ if (!rule->label)
+ return true;
+
+ opt_list = rule->label;
+ break;
+ default:
+ return false;
+ }
+
+ if (!func_data)
+ return false;
+
+ for (i = 0; i < opt_list->count; i++) {
+ if (!strcmp(opt_list->items[i], func_data)) {
+ matched = true;
+ break;
}
}
- mutex_unlock(&ima_rules_mutex);
+
+ return matched;
}
/**
- * ima_match_rules - determine whether an inode matches the measure rule.
+ * ima_match_rules - determine whether an inode matches the policy rule.
* @rule: a pointer to a rule
+ * @idmap: idmap of the mount the inode was found from
* @inode: a pointer to an inode
+ * @cred: a pointer to a credentials structure for user validation
+ * @prop: LSM properties of the task to be validated
* @func: LIM hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @func_data: func specific data, may be NULL
*
* Returns true on rule match, false on failure.
*/
static bool ima_match_rules(struct ima_rule_entry *rule,
- struct inode *inode, enum ima_hooks func, int mask)
+ struct mnt_idmap *idmap,
+ struct inode *inode, const struct cred *cred,
+ struct lsm_prop *prop, enum ima_hooks func, int mask,
+ const char *func_data)
{
- struct task_struct *tsk = current;
- const struct cred *cred = current_cred();
int i;
+ bool result = false;
+ struct ima_rule_entry *lsm_rule = rule;
+ bool rule_reinitialized = false;
+
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
+ return false;
+
+ switch (func) {
+ case KEY_CHECK:
+ case CRITICAL_DATA:
+ return ((rule->func == func) &&
+ ima_match_rule_data(rule, func_data, cred));
+ default:
+ break;
+ }
- if ((rule->flags & IMA_FUNC) && rule->func != func)
+ if ((rule->flags & IMA_MASK) &&
+ (rule->mask != mask && func != POST_SETATTR))
return false;
- if ((rule->flags & IMA_MASK) && rule->mask != mask)
+ if ((rule->flags & IMA_INMASK) &&
+ (!(rule->mask & mask) && func != POST_SETATTR))
return false;
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
+ if ((rule->flags & IMA_FSNAME)
+ && strcmp(rule->fsname, inode->i_sb->s_type->name))
+ return false;
+ if (rule->flags & IMA_FS_SUBTYPE) {
+ if (!inode->i_sb->s_subtype)
+ return false;
+ if (strcmp(rule->fs_subtype, inode->i_sb->s_subtype))
+ return false;
+ }
if ((rule->flags & IMA_FSUUID) &&
- memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+ !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
+ return false;
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ if (rule->flags & IMA_EUID) {
+ if (has_capability_noaudit(current, CAP_SETUID)) {
+ if (!rule->uid_op(cred->euid, rule->uid)
+ && !rule->uid_op(cred->suid, rule->uid)
+ && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ } else if (!rule->uid_op(cred->euid, rule->uid))
+ return false;
+ }
+ if ((rule->flags & IMA_GID) && !rule->gid_op(cred->gid, rule->gid))
return false;
- if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+ if (rule->flags & IMA_EGID) {
+ if (has_capability_noaudit(current, CAP_SETGID)) {
+ if (!rule->gid_op(cred->egid, rule->gid)
+ && !rule->gid_op(cred->sgid, rule->gid)
+ && !rule->gid_op(cred->gid, rule->gid))
+ return false;
+ } else if (!rule->gid_op(cred->egid, rule->gid))
+ return false;
+ }
+ if ((rule->flags & IMA_FOWNER) &&
+ !rule->fowner_op(i_uid_into_vfsuid(idmap, inode),
+ rule->fowner))
return false;
- if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
+ if ((rule->flags & IMA_FGROUP) &&
+ !rule->fgroup_op(i_gid_into_vfsgid(idmap, inode),
+ rule->fgroup))
return false;
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc = 0;
- u32 osid, sid;
- int retried = 0;
+ struct lsm_prop inode_prop = { };
+
+ if (!lsm_rule->lsm[i].rule) {
+ if (!lsm_rule->lsm[i].args_p)
+ continue;
+ else
+ return false;
+ }
- if (!rule->lsm[i].rule)
- continue;
retry:
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
case LSM_OBJ_TYPE:
- security_inode_getsecid(inode, &osid);
- rc = security_filter_rule_match(osid,
- rule->lsm[i].type,
- Audit_equal,
- rule->lsm[i].rule,
- NULL);
+ security_inode_getlsmprop(inode, &inode_prop);
+ rc = ima_filter_rule_match(&inode_prop,
+ lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
break;
case LSM_SUBJ_USER:
case LSM_SUBJ_ROLE:
case LSM_SUBJ_TYPE:
- security_task_getsecid(tsk, &sid);
- rc = security_filter_rule_match(sid,
- rule->lsm[i].type,
- Audit_equal,
- rule->lsm[i].rule,
- NULL);
+ rc = ima_filter_rule_match(prop, lsm_rule->lsm[i].type,
+ Audit_equal,
+ lsm_rule->lsm[i].rule);
+ break;
default:
break;
}
- if ((rc < 0) && (!retried)) {
- retried = 1;
- ima_lsm_update_rules();
- goto retry;
- }
- if (!rc)
- return false;
+
+ if (rc == -ESTALE && !rule_reinitialized) {
+ lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
+ if (lsm_rule) {
+ rule_reinitialized = true;
+ goto retry;
+ }
+ }
+ if (rc <= 0) {
+ result = false;
+ goto out;
+ }
}
- return true;
+ result = true;
+
+out:
+ if (rule_reinitialized) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ ima_filter_rule_free(lsm_rule->lsm[i].rule);
+ kfree(lsm_rule);
+ }
+ return result;
}
/*
* In addition to knowing that we need to appraise the file in general,
* we need to differentiate between calling hooks, for hook specific rules.
*/
-static int get_subaction(struct ima_rule_entry *rule, int func)
+static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
{
if (!(rule->flags & IMA_FUNC))
return IMA_FILE_APPRAISE;
- switch(func) {
+ switch (func) {
case MMAP_CHECK:
+ case MMAP_CHECK_REQPROT:
return IMA_MMAP_APPRAISE;
case BPRM_CHECK:
return IMA_BPRM_APPRAISE;
- case MODULE_CHECK:
- return IMA_MODULE_APPRAISE;
+ case CREDS_CHECK:
+ return IMA_CREDS_APPRAISE;
case FILE_CHECK:
- default:
+ case POST_SETATTR:
return IMA_FILE_APPRAISE;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return IMA_READ_APPRAISE;
}
}
/**
* ima_match_policy - decision based on LSM and other conditions
+ * @idmap: idmap of the mount the inode was found from
* @inode: pointer to an inode for which the policy decision is being made
+ * @cred: pointer to a credentials structure for which the policy decision is
+ * being made
+ * @prop: LSM properties of the task to be validated
* @func: IMA hook identifier
* @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @flags: IMA actions to consider (e.g. IMA_MEASURE | IMA_APPRAISE)
+ * @pcr: set the pcr to extend
+ * @template_desc: the template that should be used for this rule
+ * @func_data: func specific data, may be NULL
+ * @allowed_algos: allowlist of hash algorithms for the IMA xattr
*
* Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
* conditions.
*
- * (There is no need for locking when walking the policy list,
- * as elements in the list are never deleted, nor does the list
- * change.)
+ * Since the IMA policy may be updated multiple times we need to lock the
+ * list when walking it. Reads are many orders of magnitude more numerous
+ * than writes so ima_match_policy() is classical RCU candidate.
*/
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
- int flags)
+int ima_match_policy(struct mnt_idmap *idmap, struct inode *inode,
+ const struct cred *cred, struct lsm_prop *prop,
+ enum ima_hooks func, int mask, int flags, int *pcr,
+ struct ima_template_desc **template_desc,
+ const char *func_data, unsigned int *allowed_algos)
{
struct ima_rule_entry *entry;
int action = 0, actmask = flags | (flags << 1);
+ struct list_head *ima_rules_tmp;
- list_for_each_entry(entry, ima_rules, list) {
+ if (template_desc && !*template_desc)
+ *template_desc = ima_template_desc_current();
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
if (!(entry->action & actmask))
continue;
- if (!ima_match_rules(entry, inode, func, mask))
+ if (!ima_match_rules(entry, idmap, inode, cred, prop,
+ func, mask, func_data))
continue;
- action |= entry->flags & IMA_ACTION_FLAGS;
+ action |= entry->flags & IMA_NONACTION_FLAGS;
action |= entry->action & IMA_DO_MASK;
- if (entry->action & IMA_APPRAISE)
+ if (entry->action & IMA_APPRAISE) {
action |= get_subaction(entry, func);
+ action &= ~IMA_HASH;
+ if (ima_fail_unverifiable_sigs)
+ action |= IMA_FAIL_UNVERIFIABLE_SIGS;
+
+ if (allowed_algos &&
+ entry->flags & IMA_VALIDATE_ALGOS)
+ *allowed_algos = entry->allowed_algos;
+ }
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
else
actmask &= ~(entry->action | entry->action >> 1);
+ if ((pcr) && (entry->flags & IMA_PCR))
+ *pcr = entry->pcr;
+
+ if (template_desc && entry->template)
+ *template_desc = entry->template;
+
if (!actmask)
break;
}
+ rcu_read_unlock();
return action;
}
/**
+ * ima_update_policy_flags() - Update global IMA variables
+ *
+ * Update ima_policy_flag and ima_setxattr_allowed_hash_algorithms
+ * based on the currently loaded policy.
+ *
+ * With ima_policy_flag, the decision to short circuit out of a function
+ * or not call the function in the first place can be made earlier.
+ *
+ * With ima_setxattr_allowed_hash_algorithms, the policy can restrict the
+ * set of hash algorithms accepted when updating the security.ima xattr of
+ * a file.
+ *
+ * Context: called after a policy update and at system initialization.
+ */
+void ima_update_policy_flags(void)
+{
+ struct ima_rule_entry *entry;
+ int new_policy_flag = 0;
+ struct list_head *ima_rules_tmp;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ /*
+ * SETXATTR_CHECK rules do not implement a full policy check
+ * because rule checking would probably have an important
+ * performance impact on setxattr(). As a consequence, only one
+ * SETXATTR_CHECK can be active at a given time.
+ * Because we want to preserve that property, we set out to use
+ * atomic_cmpxchg. Either:
+ * - the atomic was non-zero: a setxattr hash policy is
+ * already enforced, we do nothing
+ * - the atomic was zero: no setxattr policy was set, enable
+ * the setxattr hash policy
+ */
+ if (entry->func == SETXATTR_CHECK) {
+ atomic_cmpxchg(&ima_setxattr_allowed_hash_algorithms,
+ 0, entry->allowed_algos);
+ /* SETXATTR_CHECK doesn't impact ima_policy_flag */
+ continue;
+ }
+
+ if (entry->action & IMA_DO_MASK)
+ new_policy_flag |= entry->action;
+ }
+ rcu_read_unlock();
+
+ ima_appraise |= (build_ima_appraise | temp_ima_appraise);
+ if (!ima_appraise)
+ new_policy_flag &= ~IMA_APPRAISE;
+
+ ima_policy_flag = new_policy_flag;
+}
+
+static int ima_appraise_flag(enum ima_hooks func)
+{
+ if (func == MODULE_CHECK)
+ return IMA_APPRAISE_MODULES;
+ else if (func == FIRMWARE_CHECK)
+ return IMA_APPRAISE_FIRMWARE;
+ else if (func == POLICY_CHECK)
+ return IMA_APPRAISE_POLICY;
+ else if (func == KEXEC_KERNEL_CHECK)
+ return IMA_APPRAISE_KEXEC;
+ return 0;
+}
+
+static void add_rules(struct ima_rule_entry *entries, int count,
+ enum policy_rule_list policy_rule)
+{
+ int i = 0;
+
+ for (i = 0; i < count; i++) {
+ struct ima_rule_entry *entry;
+
+ if (policy_rule & IMA_DEFAULT_POLICY)
+ list_add_tail(&entries[i].list, &ima_default_rules);
+
+ if (policy_rule & IMA_CUSTOM_POLICY) {
+ entry = kmemdup(&entries[i], sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ continue;
+
+ list_add_tail(&entry->list, &ima_policy_rules);
+ }
+ if (entries[i].action == APPRAISE) {
+ if (entries != build_appraise_rules)
+ temp_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
+ else
+ build_ima_appraise |=
+ ima_appraise_flag(entries[i].func);
+ }
+ }
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry);
+
+static int __init ima_init_arch_policy(void)
+{
+ const char * const *arch_rules;
+ const char * const *rules;
+ int arch_entries = 0;
+ int i = 0;
+
+ arch_rules = arch_get_ima_policy();
+ if (!arch_rules)
+ return arch_entries;
+
+ /* Get number of rules */
+ for (rules = arch_rules; *rules != NULL; rules++)
+ arch_entries++;
+
+ arch_policy_entry = kcalloc(arch_entries + 1,
+ sizeof(*arch_policy_entry), GFP_KERNEL);
+ if (!arch_policy_entry)
+ return 0;
+
+ /* Convert each policy string rules to struct ima_rule_entry format */
+ for (rules = arch_rules, i = 0; *rules != NULL; rules++) {
+ char rule[255];
+ int result;
+
+ result = strscpy(rule, *rules, sizeof(rule));
+
+ INIT_LIST_HEAD(&arch_policy_entry[i].list);
+ result = ima_parse_rule(rule, &arch_policy_entry[i]);
+ if (result) {
+ pr_warn("Skipping unknown architecture policy rule: %s\n",
+ rule);
+ memset(&arch_policy_entry[i], 0,
+ sizeof(*arch_policy_entry));
+ continue;
+ }
+ i++;
+ }
+ return i;
+}
+
+/**
* ima_init_policy - initialize the default measure rules.
*
- * ima_rules points to either the ima_default_rules or the
- * the new ima_policy_rules.
+ * ima_rules points to either the ima_default_rules or the new ima_policy_rules.
*/
void __init ima_init_policy(void)
{
- int i, measure_entries, appraise_entries;
-
- /* if !ima_use_tcb set entries = 0 so we load NO default rules */
- measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
- appraise_entries = ima_use_appraise_tcb ?
- ARRAY_SIZE(default_appraise_rules) : 0;
-
- for (i = 0; i < measure_entries + appraise_entries; i++) {
- if (i < measure_entries)
- list_add_tail(&default_rules[i].list,
- &ima_default_rules);
- else {
- int j = i - measure_entries;
-
- list_add_tail(&default_appraise_rules[j].list,
- &ima_default_rules);
- }
+ int build_appraise_entries, arch_entries;
+
+ /* if !ima_policy, we load NO default rules */
+ if (ima_policy)
+ add_rules(dont_measure_rules, ARRAY_SIZE(dont_measure_rules),
+ IMA_DEFAULT_POLICY);
+
+ switch (ima_policy) {
+ case ORIGINAL_TCB:
+ add_rules(original_measurement_rules,
+ ARRAY_SIZE(original_measurement_rules),
+ IMA_DEFAULT_POLICY);
+ break;
+ case DEFAULT_TCB:
+ add_rules(default_measurement_rules,
+ ARRAY_SIZE(default_measurement_rules),
+ IMA_DEFAULT_POLICY);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Based on runtime secure boot flags, insert arch specific measurement
+ * and appraise rules requiring file signatures for both the initial
+ * and custom policies, prior to other appraise rules.
+ * (Highest priority)
+ */
+ arch_entries = ima_init_arch_policy();
+ if (!arch_entries)
+ pr_info("No architecture policies found\n");
+ else
+ add_rules(arch_policy_entry, arch_entries,
+ IMA_DEFAULT_POLICY | IMA_CUSTOM_POLICY);
+
+ /*
+ * Insert the builtin "secure_boot" policy rules requiring file
+ * signatures, prior to other appraise rules.
+ */
+ if (ima_use_secure_boot)
+ add_rules(secure_boot_rules, ARRAY_SIZE(secure_boot_rules),
+ IMA_DEFAULT_POLICY);
+
+ /*
+ * Insert the build time appraise rules requiring file signatures
+ * for both the initial and custom policies, prior to other appraise
+ * rules. As the secure boot rules includes all of the build time
+ * rules, include either one or the other set of rules, but not both.
+ */
+ build_appraise_entries = ARRAY_SIZE(build_appraise_rules);
+ if (build_appraise_entries) {
+ if (ima_use_secure_boot)
+ add_rules(build_appraise_rules, build_appraise_entries,
+ IMA_CUSTOM_POLICY);
+ else
+ add_rules(build_appraise_rules, build_appraise_entries,
+ IMA_DEFAULT_POLICY | IMA_CUSTOM_POLICY);
}
- ima_rules = &ima_default_rules;
+ if (ima_use_appraise_tcb)
+ add_rules(default_appraise_rules,
+ ARRAY_SIZE(default_appraise_rules),
+ IMA_DEFAULT_POLICY);
+
+ if (ima_use_critical_data)
+ add_rules(critical_data_rules,
+ ARRAY_SIZE(critical_data_rules),
+ IMA_DEFAULT_POLICY);
+
+ atomic_set(&ima_setxattr_allowed_hash_algorithms, 0);
+
+ ima_update_policy_flags();
+}
+
+/* Make sure we have a valid policy, at least containing some rules. */
+int ima_check_policy(void)
+{
+ if (list_empty(&ima_temp_rules))
+ return -EINVAL;
+ return 0;
}
/**
* ima_update_policy - update default_rules with new measure rules
*
* Called on file .release to update the default rules with a complete new
- * policy. Once updated, the policy is locked, no additional rules can be
- * added to the policy.
+ * policy. What we do here is to splice ima_policy_rules and ima_temp_rules so
+ * they make a queue. The policy may be updated multiple times and this is the
+ * RCU updater.
+ *
+ * Policy rules are never deleted so ima_policy_flag gets zeroed only once when
+ * we switch from the default policy to user defined.
*/
void ima_update_policy(void)
{
- const char *op = "policy_update";
- const char *cause = "already exists";
- int result = 1;
- int audit_info = 0;
+ struct list_head *policy = &ima_policy_rules;
+
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
+
+ if (ima_rules != (struct list_head __rcu *)policy) {
+ ima_policy_flag = 0;
- if (ima_rules == &ima_default_rules) {
- ima_rules = &ima_policy_rules;
- cause = "complete";
- result = 0;
+ rcu_assign_pointer(ima_rules, policy);
+ /*
+ * IMA architecture specific policy rules are specified
+ * as strings and converted to an array of ima_entry_rules
+ * on boot. After loading a custom policy, free the
+ * architecture specific rules stored as an array.
+ */
+ kfree(arch_policy_entry);
}
- integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
- NULL, op, cause, result, audit_info);
+ ima_update_policy_flags();
+
+ /* Custom IMA policy has been loaded */
+ ima_process_queued_keys();
}
-enum {
- Opt_err = -1,
- Opt_measure = 1, Opt_dont_measure,
+/* Keep the enumeration in sync with the policy_tokens! */
+enum policy_opt {
+ Opt_measure, Opt_dont_measure,
Opt_appraise, Opt_dont_appraise,
- Opt_audit,
+ Opt_audit, Opt_dont_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
- Opt_appraise_type, Opt_fsuuid
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname, Opt_fs_subtype, Opt_fsuuid,
+ Opt_uid_eq, Opt_euid_eq, Opt_gid_eq, Opt_egid_eq,
+ Opt_fowner_eq, Opt_fgroup_eq,
+ Opt_uid_gt, Opt_euid_gt, Opt_gid_gt, Opt_egid_gt,
+ Opt_fowner_gt, Opt_fgroup_gt,
+ Opt_uid_lt, Opt_euid_lt, Opt_gid_lt, Opt_egid_lt,
+ Opt_fowner_lt, Opt_fgroup_lt,
+ Opt_digest_type,
+ Opt_appraise_type, Opt_appraise_flag, Opt_appraise_algos,
+ Opt_permit_directio, Opt_pcr, Opt_template, Opt_keyrings,
+ Opt_label, Opt_err
};
-static match_table_t policy_tokens = {
+static const match_table_t policy_tokens = {
{Opt_measure, "measure"},
{Opt_dont_measure, "dont_measure"},
{Opt_appraise, "appraise"},
{Opt_dont_appraise, "dont_appraise"},
{Opt_audit, "audit"},
+ {Opt_dont_audit, "dont_audit"},
+ {Opt_hash, "hash"},
+ {Opt_dont_hash, "dont_hash"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
@@ -370,10 +1109,36 @@ static match_table_t policy_tokens = {
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsname, "fsname=%s"},
+ {Opt_fs_subtype, "fs_subtype=%s"},
{Opt_fsuuid, "fsuuid=%s"},
- {Opt_uid, "uid=%s"},
- {Opt_fowner, "fowner=%s"},
+ {Opt_uid_eq, "uid=%s"},
+ {Opt_euid_eq, "euid=%s"},
+ {Opt_gid_eq, "gid=%s"},
+ {Opt_egid_eq, "egid=%s"},
+ {Opt_fowner_eq, "fowner=%s"},
+ {Opt_fgroup_eq, "fgroup=%s"},
+ {Opt_uid_gt, "uid>%s"},
+ {Opt_euid_gt, "euid>%s"},
+ {Opt_gid_gt, "gid>%s"},
+ {Opt_egid_gt, "egid>%s"},
+ {Opt_fowner_gt, "fowner>%s"},
+ {Opt_fgroup_gt, "fgroup>%s"},
+ {Opt_uid_lt, "uid<%s"},
+ {Opt_euid_lt, "euid<%s"},
+ {Opt_gid_lt, "gid<%s"},
+ {Opt_egid_lt, "egid<%s"},
+ {Opt_fowner_lt, "fowner<%s"},
+ {Opt_fgroup_lt, "fgroup<%s"},
+ {Opt_digest_type, "digest_type=%s"},
{Opt_appraise_type, "appraise_type=%s"},
+ {Opt_appraise_flag, "appraise_flag=%s"},
+ {Opt_appraise_algos, "appraise_algos=%s"},
+ {Opt_permit_directio, "permit_directio"},
+ {Opt_pcr, "pcr=%s"},
+ {Opt_template, "template=%s"},
+ {Opt_keyrings, "keyrings=%s"},
+ {Opt_label, "label=%s"},
{Opt_err, NULL}
};
@@ -390,42 +1155,300 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
return -ENOMEM;
entry->lsm[lsm_rule].type = audit_type;
- result = security_filter_rule_init(entry->lsm[lsm_rule].type,
- Audit_equal,
- entry->lsm[lsm_rule].args_p,
- &entry->lsm[lsm_rule].rule);
+ result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
+ entry->lsm[lsm_rule].args_p,
+ &entry->lsm[lsm_rule].rule,
+ GFP_KERNEL);
if (!entry->lsm[lsm_rule].rule) {
- kfree(entry->lsm[lsm_rule].args_p);
- return -EINVAL;
+ pr_warn("rule for LSM \'%s\' is undefined\n",
+ entry->lsm[lsm_rule].args_p);
+
+ if (ima_rules == (struct list_head __rcu *)(&ima_default_rules)) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ entry->lsm[lsm_rule].args_p = NULL;
+ result = -EINVAL;
+ } else
+ result = 0;
}
return result;
}
+static void ima_log_string_op(struct audit_buffer *ab, char *key, char *value,
+ enum policy_opt rule_operator)
+{
+ if (!ab)
+ return;
+
+ switch (rule_operator) {
+ case Opt_uid_gt:
+ case Opt_euid_gt:
+ case Opt_gid_gt:
+ case Opt_egid_gt:
+ case Opt_fowner_gt:
+ case Opt_fgroup_gt:
+ audit_log_format(ab, "%s>", key);
+ break;
+ case Opt_uid_lt:
+ case Opt_euid_lt:
+ case Opt_gid_lt:
+ case Opt_egid_lt:
+ case Opt_fowner_lt:
+ case Opt_fgroup_lt:
+ audit_log_format(ab, "%s<", key);
+ break;
+ default:
+ audit_log_format(ab, "%s=", key);
+ }
+ audit_log_format(ab, "%s ", value);
+}
static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
{
- audit_log_format(ab, "%s=", key);
- audit_log_untrustedstring(ab, value);
- audit_log_format(ab, " ");
+ ima_log_string_op(ab, key, value, Opt_err);
+}
+
+/*
+ * Validating the appended signature included in the measurement list requires
+ * the file hash calculated without the appended signature (i.e., the 'd-modsig'
+ * field). Therefore, notify the user if they have the 'modsig' field but not
+ * the 'd-modsig' field in the template.
+ */
+static void check_template_modsig(const struct ima_template_desc *template)
+{
+#define MSG "template with 'modsig' field also needs 'd-modsig' field\n"
+ bool has_modsig, has_dmodsig;
+ static bool checked;
+ int i;
+
+ /* We only need to notify the user once. */
+ if (checked)
+ return;
+
+ has_modsig = has_dmodsig = false;
+ for (i = 0; i < template->num_fields; i++) {
+ if (!strcmp(template->fields[i]->field_id, "modsig"))
+ has_modsig = true;
+ else if (!strcmp(template->fields[i]->field_id, "d-modsig"))
+ has_dmodsig = true;
+ }
+
+ if (has_modsig && !has_dmodsig)
+ pr_notice(MSG);
+
+ checked = true;
+#undef MSG
+}
+
+/*
+ * Warn if the template does not contain the given field.
+ */
+static void check_template_field(const struct ima_template_desc *template,
+ const char *field, const char *msg)
+{
+ int i;
+
+ for (i = 0; i < template->num_fields; i++)
+ if (!strcmp(template->fields[i]->field_id, field))
+ return;
+
+ pr_notice_once("%s", msg);
+}
+
+static bool ima_validate_rule(struct ima_rule_entry *entry)
+{
+ /* Ensure that the action is set and is compatible with the flags */
+ if (entry->action == UNKNOWN)
+ return false;
+
+ if (entry->action != MEASURE && entry->flags & IMA_PCR)
+ return false;
+
+ if (entry->action != APPRAISE &&
+ entry->flags & (IMA_DIGSIG_REQUIRED | IMA_MODSIG_ALLOWED |
+ IMA_CHECK_BLACKLIST | IMA_VALIDATE_ALGOS))
+ return false;
+
+ /*
+ * The IMA_FUNC bit must be set if and only if there's a valid hook
+ * function specified, and vice versa. Enforcing this property allows
+ * for the NONE case below to validate a rule without an explicit hook
+ * function.
+ */
+ if (((entry->flags & IMA_FUNC) && entry->func == NONE) ||
+ (!(entry->flags & IMA_FUNC) && entry->func != NONE))
+ return false;
+
+ /*
+ * Ensure that the hook function is compatible with the other
+ * components of the rule
+ */
+ switch (entry->func) {
+ case NONE:
+ case FILE_CHECK:
+ case MMAP_CHECK:
+ case MMAP_CHECK_REQPROT:
+ case BPRM_CHECK:
+ case CREDS_CHECK:
+ case POST_SETATTR:
+ case FIRMWARE_CHECK:
+ case POLICY_CHECK:
+ if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC |
+ IMA_UID | IMA_FOWNER | IMA_FSUUID |
+ IMA_INMASK | IMA_EUID | IMA_PCR |
+ IMA_FSNAME | IMA_FS_SUBTYPE |
+ IMA_GID | IMA_EGID |
+ IMA_FGROUP | IMA_DIGSIG_REQUIRED |
+ IMA_PERMIT_DIRECTIO | IMA_VALIDATE_ALGOS |
+ IMA_CHECK_BLACKLIST | IMA_VERITY_REQUIRED))
+ return false;
+
+ break;
+ case MODULE_CHECK:
+ case KEXEC_KERNEL_CHECK:
+ case KEXEC_INITRAMFS_CHECK:
+ if (entry->flags & ~(IMA_FUNC | IMA_MASK | IMA_FSMAGIC |
+ IMA_UID | IMA_FOWNER | IMA_FSUUID |
+ IMA_INMASK | IMA_EUID | IMA_PCR |
+ IMA_FSNAME | IMA_FS_SUBTYPE |
+ IMA_GID | IMA_EGID |
+ IMA_FGROUP | IMA_DIGSIG_REQUIRED |
+ IMA_PERMIT_DIRECTIO | IMA_MODSIG_ALLOWED |
+ IMA_CHECK_BLACKLIST | IMA_VALIDATE_ALGOS))
+ return false;
+
+ break;
+ case KEXEC_CMDLINE:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_FSMAGIC | IMA_UID |
+ IMA_FOWNER | IMA_FSUUID | IMA_EUID |
+ IMA_PCR | IMA_FSNAME | IMA_FS_SUBTYPE |
+ IMA_GID | IMA_EGID |
+ IMA_FGROUP))
+ return false;
+
+ break;
+ case KEY_CHECK:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_UID | IMA_GID | IMA_PCR |
+ IMA_KEYRINGS))
+ return false;
+
+ if (ima_rule_contains_lsm_cond(entry))
+ return false;
+
+ break;
+ case CRITICAL_DATA:
+ if (entry->action & ~(MEASURE | DONT_MEASURE))
+ return false;
+
+ if (entry->flags & ~(IMA_FUNC | IMA_UID | IMA_GID | IMA_PCR |
+ IMA_LABEL))
+ return false;
+
+ if (ima_rule_contains_lsm_cond(entry))
+ return false;
+
+ break;
+ case SETXATTR_CHECK:
+ /* any action other than APPRAISE is unsupported */
+ if (entry->action != APPRAISE)
+ return false;
+
+ /* SETXATTR_CHECK requires an appraise_algos parameter */
+ if (!(entry->flags & IMA_VALIDATE_ALGOS))
+ return false;
+
+ /*
+ * full policies are not supported, they would have too
+ * much of a performance impact
+ */
+ if (entry->flags & ~(IMA_FUNC | IMA_VALIDATE_ALGOS))
+ return false;
+
+ break;
+ default:
+ return false;
+ }
+
+ /* Ensure that combinations of flags are compatible with each other */
+ if (entry->flags & IMA_CHECK_BLACKLIST &&
+ !(entry->flags & IMA_DIGSIG_REQUIRED))
+ return false;
+
+ /*
+ * Unlike for regular IMA 'appraise' policy rules where security.ima
+ * xattr may contain either a file hash or signature, the security.ima
+ * xattr for fsverity must contain a file signature (sigv3). Ensure
+ * that 'appraise' rules for fsverity require file signatures by
+ * checking the IMA_DIGSIG_REQUIRED flag is set.
+ */
+ if (entry->action == APPRAISE &&
+ (entry->flags & IMA_VERITY_REQUIRED) &&
+ !(entry->flags & IMA_DIGSIG_REQUIRED))
+ return false;
+
+ return true;
+}
+
+static unsigned int ima_parse_appraise_algos(char *arg)
+{
+ unsigned int res = 0;
+ int idx;
+ char *token;
+
+ while ((token = strsep(&arg, ",")) != NULL) {
+ idx = match_string(hash_algo_name, HASH_ALGO__LAST, token);
+
+ if (idx < 0) {
+ pr_err("unknown hash algorithm \"%s\"",
+ token);
+ return 0;
+ }
+
+ if (!crypto_has_alg(hash_algo_name[idx], 0, 0)) {
+ pr_err("unavailable hash algorithm \"%s\", check your kernel configuration",
+ token);
+ return 0;
+ }
+
+ /* Add the hash algorithm to the 'allowed' bitfield */
+ res |= (1U << idx);
+ }
+
+ return res;
}
static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
{
struct audit_buffer *ab;
+ char *from;
char *p;
+ bool eid_token; /* either euid or egid */
+ struct ima_template_desc *template_desc;
int result = 0;
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
+ ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_POLICY_RULE);
entry->uid = INVALID_UID;
+ entry->gid = INVALID_GID;
entry->fowner = INVALID_UID;
+ entry->fgroup = INVALID_GID;
+ entry->uid_op = &uid_eq;
+ entry->gid_op = &gid_eq;
+ entry->fowner_op = &vfsuid_eq_kuid;
+ entry->fgroup_op = &vfsgid_eq_kgid;
entry->action = UNKNOWN;
while ((p = strsep(&rule, " \t")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
unsigned long lnum;
- if (result < 0)
+ if (result < 0 || *p == '#') /* ignore suffixed comment */
break;
if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
continue;
@@ -471,6 +1494,30 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->action = AUDIT;
break;
+ case Opt_dont_audit:
+ ima_log_string(ab, "action", "dont_audit");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_AUDIT;
+ break;
+ case Opt_hash:
+ ima_log_string(ab, "action", "hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = HASH;
+ break;
+ case Opt_dont_hash:
+ ima_log_string(ab, "action", "dont_hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_HASH;
+ break;
case Opt_func:
ima_log_string(ab, "func", args[0].from);
@@ -484,11 +1531,34 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->func = FILE_CHECK;
else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
entry->func = MODULE_CHECK;
+ else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0)
+ entry->func = FIRMWARE_CHECK;
else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
|| (strcmp(args[0].from, "MMAP_CHECK") == 0))
entry->func = MMAP_CHECK;
+ else if ((strcmp(args[0].from, "MMAP_CHECK_REQPROT") == 0))
+ entry->func = MMAP_CHECK_REQPROT;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
entry->func = BPRM_CHECK;
+ else if (strcmp(args[0].from, "CREDS_CHECK") == 0)
+ entry->func = CREDS_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_KERNEL_CHECK") ==
+ 0)
+ entry->func = KEXEC_KERNEL_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_INITRAMFS_CHECK")
+ == 0)
+ entry->func = KEXEC_INITRAMFS_CHECK;
+ else if (strcmp(args[0].from, "POLICY_CHECK") == 0)
+ entry->func = POLICY_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0)
+ entry->func = KEXEC_CMDLINE;
+ else if (IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) &&
+ strcmp(args[0].from, "KEY_CHECK") == 0)
+ entry->func = KEY_CHECK;
+ else if (strcmp(args[0].from, "CRITICAL_DATA") == 0)
+ entry->func = CRITICAL_DATA;
+ else if (strcmp(args[0].from, "SETXATTR_CHECK") == 0)
+ entry->func = SETXATTR_CHECK;
else
result = -EINVAL;
if (!result)
@@ -500,18 +1570,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
if (entry->mask)
result = -EINVAL;
- if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
+ from = args[0].from;
+ if (*from == '^')
+ from++;
+
+ if ((strcmp(from, "MAY_EXEC")) == 0)
entry->mask = MAY_EXEC;
- else if (strcmp(args[0].from, "MAY_WRITE") == 0)
+ else if (strcmp(from, "MAY_WRITE") == 0)
entry->mask = MAY_WRITE;
- else if (strcmp(args[0].from, "MAY_READ") == 0)
+ else if (strcmp(from, "MAY_READ") == 0)
entry->mask = MAY_READ;
- else if (strcmp(args[0].from, "MAY_APPEND") == 0)
+ else if (strcmp(from, "MAY_APPEND") == 0)
entry->mask = MAY_APPEND;
else
result = -EINVAL;
if (!result)
- entry->flags |= IMA_MASK;
+ entry->flags |= (*args[0].from == '^')
+ ? IMA_INMASK : IMA_MASK;
break;
case Opt_fsmagic:
ima_log_string(ab, "fsmagic", args[0].from);
@@ -521,59 +1596,206 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
break;
}
- result = strict_strtoul(args[0].from, 16,
- &entry->fsmagic);
+ result = kstrtoul(args[0].from, 16, &entry->fsmagic);
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
+ case Opt_fsname:
+ ima_log_string(ab, "fsname", args[0].from);
+
+ entry->fsname = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fsname) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FSNAME;
+ break;
+ case Opt_fs_subtype:
+ ima_log_string(ab, "fs_subtype", args[0].from);
+
+ if (entry->fs_subtype) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->fs_subtype = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fs_subtype) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FS_SUBTYPE;
+ break;
+ case Opt_keyrings:
+ ima_log_string(ab, "keyrings", args[0].from);
+
+ if (!IS_ENABLED(CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS) ||
+ entry->keyrings) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->keyrings = ima_alloc_rule_opt_list(args);
+ if (IS_ERR(entry->keyrings)) {
+ result = PTR_ERR(entry->keyrings);
+ entry->keyrings = NULL;
+ break;
+ }
+
+ entry->flags |= IMA_KEYRINGS;
+ break;
+ case Opt_label:
+ ima_log_string(ab, "label", args[0].from);
+
+ if (entry->label) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->label = ima_alloc_rule_opt_list(args);
+ if (IS_ERR(entry->label)) {
+ result = PTR_ERR(entry->label);
+ entry->label = NULL;
+ break;
+ }
+
+ entry->flags |= IMA_LABEL;
+ break;
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
- if (memchr_inv(entry->fsuuid, 0x00,
- sizeof(entry->fsuuid))) {
+ if (!uuid_is_null(&entry->fsuuid)) {
result = -EINVAL;
break;
}
- result = blk_part_pack_uuid(args[0].from,
- entry->fsuuid);
+ result = uuid_parse(args[0].from, &entry->fsuuid);
if (!result)
entry->flags |= IMA_FSUUID;
break;
- case Opt_uid:
- ima_log_string(ab, "uid", args[0].from);
+ case Opt_uid_gt:
+ case Opt_euid_gt:
+ entry->uid_op = &uid_gt;
+ fallthrough;
+ case Opt_uid_lt:
+ case Opt_euid_lt:
+ if ((token == Opt_uid_lt) || (token == Opt_euid_lt))
+ entry->uid_op = &uid_lt;
+ fallthrough;
+ case Opt_uid_eq:
+ case Opt_euid_eq:
+ eid_token = (token == Opt_euid_eq) ||
+ (token == Opt_euid_gt) ||
+ (token == Opt_euid_lt);
+
+ ima_log_string_op(ab, eid_token ? "euid" : "uid",
+ args[0].from, token);
if (uid_valid(entry->uid)) {
result = -EINVAL;
break;
}
- result = strict_strtoul(args[0].from, 10, &lnum);
+ result = kstrtoul(args[0].from, 10, &lnum);
if (!result) {
- entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
- if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
+ entry->uid = make_kuid(current_user_ns(),
+ (uid_t) lnum);
+ if (!uid_valid(entry->uid) ||
+ (uid_t)lnum != lnum)
result = -EINVAL;
else
- entry->flags |= IMA_UID;
+ entry->flags |= eid_token
+ ? IMA_EUID : IMA_UID;
}
break;
- case Opt_fowner:
- ima_log_string(ab, "fowner", args[0].from);
+ case Opt_gid_gt:
+ case Opt_egid_gt:
+ entry->gid_op = &gid_gt;
+ fallthrough;
+ case Opt_gid_lt:
+ case Opt_egid_lt:
+ if ((token == Opt_gid_lt) || (token == Opt_egid_lt))
+ entry->gid_op = &gid_lt;
+ fallthrough;
+ case Opt_gid_eq:
+ case Opt_egid_eq:
+ eid_token = (token == Opt_egid_eq) ||
+ (token == Opt_egid_gt) ||
+ (token == Opt_egid_lt);
+
+ ima_log_string_op(ab, eid_token ? "egid" : "gid",
+ args[0].from, token);
+
+ if (gid_valid(entry->gid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->gid = make_kgid(current_user_ns(),
+ (gid_t)lnum);
+ if (!gid_valid(entry->gid) ||
+ (((gid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= eid_token
+ ? IMA_EGID : IMA_GID;
+ }
+ break;
+ case Opt_fowner_gt:
+ entry->fowner_op = &vfsuid_gt_kuid;
+ fallthrough;
+ case Opt_fowner_lt:
+ if (token == Opt_fowner_lt)
+ entry->fowner_op = &vfsuid_lt_kuid;
+ fallthrough;
+ case Opt_fowner_eq:
+ ima_log_string_op(ab, "fowner", args[0].from, token);
if (uid_valid(entry->fowner)) {
result = -EINVAL;
break;
}
- result = strict_strtoul(args[0].from, 10, &lnum);
+ result = kstrtoul(args[0].from, 10, &lnum);
if (!result) {
- entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
- if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+ entry->fowner = make_kuid(current_user_ns(),
+ (uid_t)lnum);
+ if (!uid_valid(entry->fowner) ||
+ (((uid_t)lnum) != lnum))
result = -EINVAL;
else
entry->flags |= IMA_FOWNER;
}
break;
+ case Opt_fgroup_gt:
+ entry->fgroup_op = &vfsgid_gt_kgid;
+ fallthrough;
+ case Opt_fgroup_lt:
+ if (token == Opt_fgroup_lt)
+ entry->fgroup_op = &vfsgid_lt_kgid;
+ fallthrough;
+ case Opt_fgroup_eq:
+ ima_log_string_op(ab, "fgroup", args[0].from, token);
+
+ if (gid_valid(entry->fgroup)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fgroup = make_kgid(current_user_ns(),
+ (gid_t)lnum);
+ if (!gid_valid(entry->fgroup) ||
+ (((gid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FGROUP;
+ }
+ break;
case Opt_obj_user:
ima_log_string(ab, "obj_user", args[0].from);
result = ima_lsm_rule_init(entry, args,
@@ -610,17 +1832,96 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
LSM_SUBJ_TYPE,
AUDIT_SUBJ_TYPE);
break;
+ case Opt_digest_type:
+ ima_log_string(ab, "digest_type", args[0].from);
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ result = -EINVAL;
+ else if ((strcmp(args[0].from, "verity")) == 0)
+ entry->flags |= IMA_VERITY_REQUIRED;
+ else
+ result = -EINVAL;
+ break;
case Opt_appraise_type:
- if (entry->action != APPRAISE) {
+ ima_log_string(ab, "appraise_type", args[0].from);
+
+ if ((strcmp(args[0].from, "imasig")) == 0) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_DIGSIG_REQUIRED | IMA_CHECK_BLACKLIST;
+ } else if (strcmp(args[0].from, "sigv3") == 0) {
+ /* Only fsverity supports sigv3 for now */
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ entry->flags |= IMA_DIGSIG_REQUIRED | IMA_CHECK_BLACKLIST;
+ else
+ result = -EINVAL;
+ } else if (IS_ENABLED(CONFIG_IMA_APPRAISE_MODSIG) &&
+ strcmp(args[0].from, "imasig|modsig") == 0) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_DIGSIG_REQUIRED |
+ IMA_MODSIG_ALLOWED | IMA_CHECK_BLACKLIST;
+ } else {
+ result = -EINVAL;
+ }
+ break;
+ case Opt_appraise_flag:
+ ima_log_string(ab, "appraise_flag", args[0].from);
+ break;
+ case Opt_appraise_algos:
+ ima_log_string(ab, "appraise_algos", args[0].from);
+
+ if (entry->allowed_algos) {
result = -EINVAL;
break;
}
- ima_log_string(ab, "appraise_type", args[0].from);
- if ((strcmp(args[0].from, "imasig")) == 0)
- entry->flags |= IMA_DIGSIG_REQUIRED;
+ entry->allowed_algos =
+ ima_parse_appraise_algos(args[0].from);
+ /* invalid or empty list of algorithms */
+ if (!entry->allowed_algos) {
+ result = -EINVAL;
+ break;
+ }
+
+ entry->flags |= IMA_VALIDATE_ALGOS;
+
+ break;
+ case Opt_permit_directio:
+ entry->flags |= IMA_PERMIT_DIRECTIO;
+ break;
+ case Opt_pcr:
+ ima_log_string(ab, "pcr", args[0].from);
+
+ result = kstrtoint(args[0].from, 10, &entry->pcr);
+ if (result || INVALID_PCR(entry->pcr))
+ result = -EINVAL;
else
+ entry->flags |= IMA_PCR;
+
+ break;
+ case Opt_template:
+ ima_log_string(ab, "template", args[0].from);
+ if (entry->action != MEASURE) {
+ result = -EINVAL;
+ break;
+ }
+ template_desc = lookup_template_desc(args[0].from);
+ if (!template_desc || entry->template) {
result = -EINVAL;
+ break;
+ }
+
+ /*
+ * template_desc_init_fields() does nothing if
+ * the template is already initialised, so
+ * it's safe to do this unconditionally
+ */
+ template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ entry->template = template_desc;
break;
case Opt_err:
ima_log_string(ab, "UNKNOWN", p);
@@ -628,10 +1929,26 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
break;
}
}
- if (!result && (entry->action == UNKNOWN))
+ if (!result && !ima_validate_rule(entry))
result = -EINVAL;
- else if (entry->func == MODULE_CHECK)
- ima_appraise |= IMA_APPRAISE_MODULES;
+ else if (entry->action == APPRAISE)
+ temp_ima_appraise |= ima_appraise_flag(entry->func);
+
+ if (!result && entry->flags & IMA_MODSIG_ALLOWED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_modsig(template_desc);
+ }
+
+ /* d-ngv2 template field recommended for unsigned fs-verity digests */
+ if (!result && entry->action == MEASURE &&
+ entry->flags & IMA_VERITY_REQUIRED) {
+ template_desc = entry->template ? entry->template :
+ ima_template_desc_current();
+ check_template_field(template_desc, "d-ngv2",
+ "verity rules should include d-ngv2");
+ }
+
audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
@@ -639,26 +1956,25 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
/**
* ima_parse_add_rule - add a rule to ima_policy_rules
- * @rule - ima measurement policy rule
+ * @rule: ima measurement policy rule
*
- * Uses a mutex to protect the policy list from multiple concurrent writers.
+ * Avoid locking by allowing just one writer at a time in ima_write_policy()
* Returns the length of the rule parsed, an error code on failure
*/
ssize_t ima_parse_add_rule(char *rule)
{
- const char *op = "update_policy";
+ static const char op[] = "update_policy";
char *p;
struct ima_rule_entry *entry;
ssize_t result, len;
int audit_info = 0;
- /* Prevent installed policy from changing */
- if (ima_rules != &ima_default_rules) {
- integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
- NULL, op, "already exists",
- -EACCES, audit_info);
- return -EACCES;
- }
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+ p += strspn(p, " \t");
+
+ if (*p == '#' || *p == '\0')
+ return len;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
@@ -669,43 +1985,400 @@ ssize_t ima_parse_add_rule(char *rule)
INIT_LIST_HEAD(&entry->list);
- p = strsep(&rule, "\n");
- len = strlen(p) + 1;
-
- if (*p == '#') {
- kfree(entry);
- return len;
- }
-
result = ima_parse_rule(p, entry);
if (result) {
- kfree(entry);
+ ima_free_rule(entry);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
- NULL, op, "invalid policy", result,
+ NULL, op, "invalid-policy", result,
audit_info);
return result;
}
- mutex_lock(&ima_rules_mutex);
- list_add_tail(&entry->list, &ima_policy_rules);
- mutex_unlock(&ima_rules_mutex);
+ list_add_tail(&entry->list, &ima_temp_rules);
return len;
}
-/* ima_delete_rules called to cleanup invalid policy */
+/**
+ * ima_delete_rules() - called to cleanup invalid in-flight policy.
+ *
+ * We don't need locking as we operate on the temp list, which is
+ * different from the active one. There is also only one user of
+ * ima_delete_rules() at a time.
+ */
void ima_delete_rules(void)
{
struct ima_rule_entry *entry, *tmp;
+
+ temp_ima_appraise = 0;
+ list_for_each_entry_safe(entry, tmp, &ima_temp_rules, list) {
+ list_del(&entry->list);
+ ima_free_rule(entry);
+ }
+}
+
+#define __ima_hook_stringify(func, str) (#func),
+
+const char *const func_tokens[] = {
+ __ima_hooks(__ima_hook_stringify)
+};
+
+#ifdef CONFIG_IMA_READ_POLICY
+enum {
+ mask_exec = 0, mask_write, mask_read, mask_append
+};
+
+static const char *const mask_tokens[] = {
+ "^MAY_EXEC",
+ "^MAY_WRITE",
+ "^MAY_READ",
+ "^MAY_APPEND"
+};
+
+void *ima_policy_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct ima_rule_entry *entry;
+ struct list_head *ima_rules_tmp;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (!l--) {
+ rcu_read_unlock();
+ return entry;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ima_rule_entry *entry = v;
+
+ rcu_read_lock();
+ entry = list_entry_rcu(entry->list.next, struct ima_rule_entry, list);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&entry->list == &ima_default_rules ||
+ &entry->list == &ima_policy_rules) ? NULL : entry;
+}
+
+void ima_policy_stop(struct seq_file *m, void *v)
+{
+}
+
+#define pt(token) policy_tokens[token].pattern
+#define mt(token) mask_tokens[token]
+
+/*
+ * policy_func_show - display the ima_hooks policy rule
+ */
+static void policy_func_show(struct seq_file *m, enum ima_hooks func)
+{
+ if (func > 0 && func < MAX_CHECK)
+ seq_printf(m, "func=%s ", func_tokens[func]);
+ else
+ seq_printf(m, "func=%d ", func);
+}
+
+static void ima_show_rule_opt_list(struct seq_file *m,
+ const struct ima_rule_opt_list *opt_list)
+{
+ size_t i;
+
+ for (i = 0; i < opt_list->count; i++)
+ seq_printf(m, "%s%s", i ? "|" : "", opt_list->items[i]);
+}
+
+static void ima_policy_show_appraise_algos(struct seq_file *m,
+ unsigned int allowed_hashes)
+{
+ int idx, list_size = 0;
+
+ for (idx = 0; idx < HASH_ALGO__LAST; idx++) {
+ if (!(allowed_hashes & (1U << idx)))
+ continue;
+
+ /* only add commas if the list contains multiple entries */
+ if (list_size++)
+ seq_puts(m, ",");
+
+ seq_puts(m, hash_algo_name[idx]);
+ }
+}
+
+int ima_policy_show(struct seq_file *m, void *v)
+{
+ struct ima_rule_entry *entry = v;
int i;
+ char tbuf[64] = {0,};
+ int offset = 0;
- mutex_lock(&ima_rules_mutex);
- list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
- for (i = 0; i < MAX_LSM_RULES; i++)
- kfree(entry->lsm[i].args_p);
+ rcu_read_lock();
- list_del(&entry->list);
- kfree(entry);
+ /* Do not print rules with inactive LSM labels */
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].args_p && !entry->lsm[i].rule) {
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+
+ if (entry->action & MEASURE)
+ seq_puts(m, pt(Opt_measure));
+ if (entry->action & DONT_MEASURE)
+ seq_puts(m, pt(Opt_dont_measure));
+ if (entry->action & APPRAISE)
+ seq_puts(m, pt(Opt_appraise));
+ if (entry->action & DONT_APPRAISE)
+ seq_puts(m, pt(Opt_dont_appraise));
+ if (entry->action & AUDIT)
+ seq_puts(m, pt(Opt_audit));
+ if (entry->action & DONT_AUDIT)
+ seq_puts(m, pt(Opt_dont_audit));
+ if (entry->action & HASH)
+ seq_puts(m, pt(Opt_hash));
+ if (entry->action & DONT_HASH)
+ seq_puts(m, pt(Opt_dont_hash));
+
+ seq_puts(m, " ");
+
+ if (entry->flags & IMA_FUNC)
+ policy_func_show(m, entry->func);
+
+ if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) {
+ if (entry->flags & IMA_MASK)
+ offset = 1;
+ if (entry->mask & MAY_EXEC)
+ seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset);
+ if (entry->mask & MAY_WRITE)
+ seq_printf(m, pt(Opt_mask), mt(mask_write) + offset);
+ if (entry->mask & MAY_READ)
+ seq_printf(m, pt(Opt_mask), mt(mask_read) + offset);
+ if (entry->mask & MAY_APPEND)
+ seq_printf(m, pt(Opt_mask), mt(mask_append) + offset);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSMAGIC) {
+ snprintf(tbuf, sizeof(tbuf), "0x%lx", entry->fsmagic);
+ seq_printf(m, pt(Opt_fsmagic), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSNAME) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname);
+ seq_printf(m, pt(Opt_fsname), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FS_SUBTYPE) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fs_subtype);
+ seq_printf(m, pt(Opt_fs_subtype), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_KEYRINGS) {
+ seq_puts(m, "keyrings=");
+ ima_show_rule_opt_list(m, entry->keyrings);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_LABEL) {
+ seq_puts(m, "label=");
+ ima_show_rule_opt_list(m, entry->label);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_PCR) {
+ snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
+ seq_printf(m, pt(Opt_pcr), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSUUID) {
+ seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_UID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_uid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_uid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_uid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_EUID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_euid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_euid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_euid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_GID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->gid));
+ if (entry->gid_op == &gid_gt)
+ seq_printf(m, pt(Opt_gid_gt), tbuf);
+ else if (entry->gid_op == &gid_lt)
+ seq_printf(m, pt(Opt_gid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_gid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_EGID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->gid));
+ if (entry->gid_op == &gid_gt)
+ seq_printf(m, pt(Opt_egid_gt), tbuf);
+ else if (entry->gid_op == &gid_lt)
+ seq_printf(m, pt(Opt_egid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_egid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FOWNER) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->fowner));
+ if (entry->fowner_op == &vfsuid_gt_kuid)
+ seq_printf(m, pt(Opt_fowner_gt), tbuf);
+ else if (entry->fowner_op == &vfsuid_lt_kuid)
+ seq_printf(m, pt(Opt_fowner_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_fowner_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FGROUP) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kgid_val(entry->fgroup));
+ if (entry->fgroup_op == &vfsgid_gt_kgid)
+ seq_printf(m, pt(Opt_fgroup_gt), tbuf);
+ else if (entry->fgroup_op == &vfsgid_lt_kgid)
+ seq_printf(m, pt(Opt_fgroup_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_fgroup_eq), tbuf);
+ seq_puts(m, " ");
}
- mutex_unlock(&ima_rules_mutex);
+
+ if (entry->flags & IMA_VALIDATE_ALGOS) {
+ seq_puts(m, "appraise_algos=");
+ ima_policy_show_appraise_algos(m, entry->allowed_algos);
+ seq_puts(m, " ");
+ }
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].rule) {
+ switch (i) {
+ case LSM_OBJ_USER:
+ seq_printf(m, pt(Opt_obj_user),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_ROLE:
+ seq_printf(m, pt(Opt_obj_role),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_TYPE:
+ seq_printf(m, pt(Opt_obj_type),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_USER:
+ seq_printf(m, pt(Opt_subj_user),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_ROLE:
+ seq_printf(m, pt(Opt_subj_role),
+ entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_TYPE:
+ seq_printf(m, pt(Opt_subj_type),
+ entry->lsm[i].args_p);
+ break;
+ }
+ seq_puts(m, " ");
+ }
+ }
+ if (entry->template)
+ seq_printf(m, "template=%s ", entry->template->name);
+ if (entry->flags & IMA_DIGSIG_REQUIRED) {
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ seq_puts(m, "appraise_type=sigv3 ");
+ else if (entry->flags & IMA_MODSIG_ALLOWED)
+ seq_puts(m, "appraise_type=imasig|modsig ");
+ else
+ seq_puts(m, "appraise_type=imasig ");
+ }
+ if (entry->flags & IMA_VERITY_REQUIRED)
+ seq_puts(m, "digest_type=verity ");
+ if (entry->flags & IMA_PERMIT_DIRECTIO)
+ seq_puts(m, "permit_directio ");
+ rcu_read_unlock();
+ seq_puts(m, "\n");
+ return 0;
+}
+#endif /* CONFIG_IMA_READ_POLICY */
+
+#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
+/*
+ * ima_appraise_signature: whether IMA will appraise a given function using
+ * an IMA digital signature. This is restricted to cases where the kernel
+ * has a set of built-in trusted keys in order to avoid an attacker simply
+ * loading additional keys.
+ */
+bool ima_appraise_signature(enum kernel_read_file_id id)
+{
+ struct ima_rule_entry *entry;
+ bool found = false;
+ enum ima_hooks func;
+ struct list_head *ima_rules_tmp;
+
+ if (id >= READING_MAX_ID)
+ return false;
+
+ if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE)
+ && security_locked_down(LOCKDOWN_KEXEC))
+ return false;
+
+ func = read_idmap[id] ?: FILE_CHECK;
+
+ rcu_read_lock();
+ ima_rules_tmp = rcu_dereference(ima_rules);
+ list_for_each_entry_rcu(entry, ima_rules_tmp, list) {
+ if (entry->action != APPRAISE)
+ continue;
+
+ /*
+ * A generic entry will match, but otherwise require that it
+ * match the func we're looking for
+ */
+ if (entry->func && entry->func != func)
+ continue;
+
+ /*
+ * We require this to be a digital signature, not a raw IMA
+ * hash.
+ */
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ found = true;
+
+ /*
+ * We've found a rule that matches, so break now even if it
+ * didn't require a digital signature - a later rule that does
+ * won't override it, so would be a false positive.
+ */
+ break;
+ }
+
+ rcu_read_unlock();
+ return found;
}
+#endif /* CONFIG_IMA_APPRAISE && CONFIG_INTEGRITY_TRUSTED_KEYRING */
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index ff63fe00c195..590637e81ad1 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -6,11 +7,6 @@
* Reiner Sailer <sailer@watson.ibm.com>
* Mimi Zohar <zohar@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
* File: ima_queue.c
* Implements queues that store template measurements and
* maintains aggregate over the stored measurements
@@ -18,14 +14,23 @@
* The measurement list is append-only. No entry is
* ever removed or changed during the boot-cycle.
*/
-#include <linux/module.h>
+
#include <linux/rculist.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include "ima.h"
#define AUDIT_CAUSE_LEN_MAX 32
+/* pre-allocated array of tpm_digest structures to extend a PCR */
+static struct tpm_digest *digests;
+
LIST_HEAD(ima_measurements); /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
/* key: inode (before secure-hashing a file) */
struct ima_h_table ima_htable = {
@@ -40,8 +45,15 @@ struct ima_h_table ima_htable = {
*/
static DEFINE_MUTEX(ima_extend_list_mutex);
+/*
+ * Used internally by the kernel to suspend measurements.
+ * Protected by ima_extend_list_mutex.
+ */
+static bool ima_measurements_suspended;
+
/* lookup up the digest value in the hash table, and return the entry */
-static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
+static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
+ int pcr)
{
struct ima_queue_entry *qe, *ret = NULL;
unsigned int key;
@@ -50,8 +62,9 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
key = ima_hash_key(digest_value);
rcu_read_lock();
hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
- if (rc == 0) {
+ rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
+ digest_value, hash_digest_size[ima_hash_algo]);
+ if ((rc == 0) && (qe->entry->pcr == pcr)) {
ret = qe;
break;
}
@@ -60,19 +73,39 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
return ret;
}
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+ int size = 0;
+
+ size += sizeof(u32); /* pcr */
+ size += TPM_DIGEST_SIZE;
+ size += sizeof(int); /* template name size field */
+ size += strlen(entry->template_desc->name);
+ size += sizeof(entry->template_data_len);
+ size += entry->template_data_len;
+ return size;
+}
+
/* ima_add_template_entry helper function:
- * - Add template entry to measurement list and hash table.
+ * - Add template entry to the measurement list and hash table, for
+ * all entries except those carried across kexec.
*
* (Called with ima_extend_list_mutex held.)
*/
-static int ima_add_digest_entry(struct ima_template_entry *entry)
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+ bool update_htable)
{
struct ima_queue_entry *qe;
unsigned int key;
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
if (qe == NULL) {
- pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n");
+ pr_err("OUT OF MEMORY ERROR creating queue entry\n");
return -ENOMEM;
}
qe->entry = entry;
@@ -81,48 +114,89 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
list_add_tail_rcu(&qe->later, &ima_measurements);
atomic_long_inc(&ima_htable.len);
- key = ima_hash_key(entry->digest);
- hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ if (update_htable) {
+ key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ }
+
+ if (binary_runtime_size != ULONG_MAX) {
+ int size;
+
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+ binary_runtime_size + size : ULONG_MAX;
+ }
return 0;
}
-static int ima_pcr_extend(const u8 *hash)
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+ if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ return ULONG_MAX;
+ else
+ return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+}
+
+static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
{
int result = 0;
- if (!ima_used_chip)
+ if (!ima_tpm_chip)
return result;
- result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
if (result != 0)
- pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
- result);
+ pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
}
-/* Add template entry to the measurement list and hash table,
- * and extend the pcr.
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode)
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
{
- u8 digest[IMA_DIGEST_SIZE];
+ u8 *digest = entry->digests[ima_hash_algo_idx].digest;
+ struct tpm_digest *digests_arg = entry->digests;
const char *audit_cause = "hash_added";
char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
int result = 0, tpmresult = 0;
mutex_lock(&ima_extend_list_mutex);
- if (!violation) {
- memcpy(digest, entry->digest, sizeof digest);
- if (ima_lookup_digest_entry(digest)) {
+
+ /*
+ * Avoid appending to the measurement log when the TPM subsystem has
+ * been shut down while preparing for system reboot.
+ */
+ if (ima_measurements_suspended) {
+ audit_cause = "measurements_suspended";
+ audit_info = 0;
+ result = -ENODEV;
+ goto out;
+ }
+
+ if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
+ if (ima_lookup_digest_entry(digest, entry->pcr)) {
audit_cause = "hash_exists";
result = -EEXIST;
goto out;
}
}
- result = ima_add_digest_entry(entry);
+ result = ima_add_digest_entry(entry,
+ !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE));
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
@@ -130,9 +204,9 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
if (violation) /* invalidate pcr */
- memset(digest, 0xff, sizeof digest);
+ digests_arg = digests;
- tpmresult = ima_pcr_extend(digest);
+ tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
if (tpmresult != 0) {
snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
tpmresult);
@@ -141,8 +215,76 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template.file_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, audit_info);
return result;
}
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+ int result = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ result = ima_add_digest_entry(entry, 0);
+ mutex_unlock(&ima_extend_list_mutex);
+ return result;
+}
+
+static void ima_measurements_suspend(void)
+{
+ mutex_lock(&ima_extend_list_mutex);
+ ima_measurements_suspended = true;
+ mutex_unlock(&ima_extend_list_mutex);
+}
+
+static int ima_reboot_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+#ifdef CONFIG_IMA_KEXEC
+ if (action == SYS_RESTART && data && !strcmp(data, "kexec reboot"))
+ ima_measure_kexec_event("kexec_execute");
+#endif
+
+ ima_measurements_suspend();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ima_reboot_nb = {
+ .notifier_call = ima_reboot_notifier,
+};
+
+void __init ima_init_reboot_notifier(void)
+{
+ register_reboot_notifier(&ima_reboot_nb);
+}
+
+int __init ima_init_digests(void)
+{
+ u16 digest_size;
+ u16 crypto_id;
+ int i;
+
+ if (!ima_tpm_chip)
+ return 0;
+
+ digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
+ GFP_NOFS);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (crypto_id == HASH_ALGO__LAST)
+ digest_size = SHA1_DIGEST_SIZE;
+
+ memset(digests[i].digest, 0xff, digest_size);
+ }
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_queue_keys.c b/security/integrity/ima/ima_queue_keys.c
new file mode 100644
index 000000000000..4f0aea155bf9
--- /dev/null
+++ b/security/integrity/ima/ima_queue_keys.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microsoft Corporation
+ *
+ * Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
+ *
+ * File: ima_queue_keys.c
+ * Enables deferred processing of keys
+ */
+
+#include <linux/user_namespace.h>
+#include <linux/workqueue.h>
+#include <keys/asymmetric-type.h>
+#include "ima.h"
+
+/*
+ * Flag to indicate whether a key can be processed
+ * right away or should be queued for processing later.
+ */
+static bool ima_process_keys;
+
+/*
+ * To synchronize access to the list of keys that need to be measured
+ */
+static DEFINE_MUTEX(ima_keys_lock);
+static LIST_HEAD(ima_keys);
+
+/*
+ * If custom IMA policy is not loaded then keys queued up
+ * for measurement should be freed. This worker is used
+ * for handling this scenario.
+ */
+static long ima_key_queue_timeout = 300000; /* 5 Minutes */
+static void ima_keys_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ima_keys_delayed_work, ima_keys_handler);
+static bool timer_expired;
+
+/*
+ * This worker function frees keys that may still be
+ * queued up in case custom IMA policy was not loaded.
+ */
+static void ima_keys_handler(struct work_struct *work)
+{
+ timer_expired = true;
+ ima_process_queued_keys();
+}
+
+/*
+ * This function sets up a worker to free queued keys in case
+ * custom IMA policy was never loaded.
+ */
+void ima_init_key_queue(void)
+{
+ schedule_delayed_work(&ima_keys_delayed_work,
+ msecs_to_jiffies(ima_key_queue_timeout));
+}
+
+static void ima_free_key_entry(struct ima_key_entry *entry)
+{
+ if (entry) {
+ kfree(entry->payload);
+ kfree(entry->keyring_name);
+ kfree(entry);
+ }
+}
+
+static struct ima_key_entry *ima_alloc_key_entry(struct key *keyring,
+ const void *payload,
+ size_t payload_len)
+{
+ int rc = 0;
+ const char *audit_cause = "ENOMEM";
+ struct ima_key_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry) {
+ entry->payload = kmemdup(payload, payload_len, GFP_KERNEL);
+ entry->keyring_name = kstrdup(keyring->description,
+ GFP_KERNEL);
+ entry->payload_len = payload_len;
+ }
+
+ if ((entry == NULL) || (entry->payload == NULL) ||
+ (entry->keyring_name == NULL)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+
+out:
+ if (rc) {
+ integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL,
+ keyring->description,
+ func_measure_str(KEY_CHECK),
+ audit_cause, rc, 0, rc);
+ ima_free_key_entry(entry);
+ entry = NULL;
+ }
+
+ return entry;
+}
+
+bool ima_queue_key(struct key *keyring, const void *payload,
+ size_t payload_len)
+{
+ bool queued = false;
+ struct ima_key_entry *entry;
+
+ entry = ima_alloc_key_entry(keyring, payload, payload_len);
+ if (!entry)
+ return false;
+
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ list_add_tail(&entry->list, &ima_keys);
+ queued = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!queued)
+ ima_free_key_entry(entry);
+
+ return queued;
+}
+
+/*
+ * ima_process_queued_keys() - process keys queued for measurement
+ *
+ * This function sets ima_process_keys to true and processes queued keys.
+ * From here on keys will be processed right away (not queued).
+ */
+void ima_process_queued_keys(void)
+{
+ struct ima_key_entry *entry, *tmp;
+ bool process = false;
+
+ if (ima_process_keys)
+ return;
+
+ /*
+ * Since ima_process_keys is set to true, any new key will be
+ * processed immediately and not be queued to ima_keys list.
+ * First one setting the ima_process_keys flag to true will
+ * process the queued keys.
+ */
+ mutex_lock(&ima_keys_lock);
+ if (!ima_process_keys) {
+ ima_process_keys = true;
+ process = true;
+ }
+ mutex_unlock(&ima_keys_lock);
+
+ if (!process)
+ return;
+
+ if (!timer_expired)
+ cancel_delayed_work_sync(&ima_keys_delayed_work);
+
+ list_for_each_entry_safe(entry, tmp, &ima_keys, list) {
+ if (!timer_expired)
+ process_buffer_measurement(&nop_mnt_idmap, NULL,
+ entry->payload,
+ entry->payload_len,
+ entry->keyring_name,
+ KEY_CHECK, 0,
+ entry->keyring_name,
+ false, NULL, 0);
+ list_del(&entry->list);
+ ima_free_key_entry(entry);
+ }
+}
+
+inline bool ima_should_queue_key(void)
+{
+ return !ima_process_keys;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000000..04c49f05cb74
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+
+#include <linux/rculist.h>
+#include "ima.h"
+#include "ima_template_lib.h"
+
+enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME,
+ HDR_TEMPLATE_DATA, HDR__LAST };
+
+static struct ima_template_desc builtin_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng", .fmt = "d-ng|n-ng"},
+ {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "ima-ngv2", .fmt = "d-ngv2|n-ng"},
+ {.name = "ima-sigv2", .fmt = "d-ngv2|n-ng|sig"},
+ {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"},
+ {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"},
+ {.name = "evm-sig",
+ .fmt = "d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode"},
+ {.name = "", .fmt = ""}, /* placeholder for a custom format */
+};
+
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
+
+static const struct ima_template_field supported_fields[] = {
+ {.field_id = "d", .field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n", .field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "d-ngv2", .field_init = ima_eventdigest_ngv2_init,
+ .field_show = ima_show_template_digest_ngv2},
+ {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig", .field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "buf", .field_init = ima_eventbuf_init,
+ .field_show = ima_show_template_buf},
+ {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "modsig", .field_init = ima_eventmodsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "evmsig", .field_init = ima_eventevmsig_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "iuid", .field_init = ima_eventinodeuid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "igid", .field_init = ima_eventinodegid_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "imode", .field_init = ima_eventinodemode_init,
+ .field_show = ima_show_template_uint},
+ {.field_id = "xattrnames",
+ .field_init = ima_eventinodexattrnames_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "xattrlengths",
+ .field_init = ima_eventinodexattrlengths_init,
+ .field_show = ima_show_template_sig},
+ {.field_id = "xattrvalues",
+ .field_init = ima_eventinodexattrvalues_init,
+ .field_show = ima_show_template_sig},
+};
+
+/*
+ * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't
+ * need to be accounted for since they shouldn't be defined in the same template
+ * description as 'd-ng' and 'n-ng' respectively.
+ */
+#define MAX_TEMPLATE_NAME_LEN \
+ sizeof("d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode")
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *ima_buf_template;
+
+/**
+ * ima_template_has_modsig - Check whether template has modsig-related fields.
+ * @ima_template: IMA template to check.
+ *
+ * Tells whether the given template has fields referencing a file's appended
+ * signature.
+ */
+bool ima_template_has_modsig(const struct ima_template_desc *ima_template)
+{
+ int i;
+
+ for (i = 0; i < ima_template->num_fields; i++)
+ if (!strcmp(ima_template->fields[i]->field_id, "modsig") ||
+ !strcmp(ima_template->fields[i]->field_id, "d-modsig"))
+ return true;
+
+ return false;
+}
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ if (template_setup_done)
+ return 1;
+
+ if (!ima_template)
+ ima_init_template_list();
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc) {
+ pr_err("template %s not found, using %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ template_setup_done = 1;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static int __init ima_template_fmt_setup(char *str)
+{
+ int num_templates = ARRAY_SIZE(builtin_templates);
+
+ if (template_setup_done)
+ return 1;
+
+ if (template_desc_init_fields(str, NULL, NULL) < 0) {
+ pr_err("format string '%s' not valid, using template %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ builtin_templates[num_templates - 1].fmt = str;
+ ima_template = builtin_templates + num_templates - 1;
+ template_setup_done = 1;
+
+ return 1;
+}
+__setup("ima_template_fmt=", ima_template_fmt_setup);
+
+struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ struct ima_template_desc *template_desc;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+ if ((strcmp(template_desc->name, name) == 0) ||
+ (strcmp(template_desc->fmt, name) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? template_desc : NULL;
+}
+
+static const struct ima_template_field *
+lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+int template_desc_init_fields(const char *template_fmt,
+ const struct ima_template_field ***fields,
+ int *num_fields)
+{
+ const char *template_fmt_ptr;
+ const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+ int template_num_fields;
+ int i, len;
+
+ if (num_fields && *num_fields > 0) /* already initialized? */
+ return 0;
+
+ template_num_fields = template_fmt_size(template_fmt);
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
+ pr_err("format string '%s' contains too many fields\n",
+ template_fmt);
+ return -EINVAL;
+ }
+
+ for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
+ i++, template_fmt_ptr += len + 1) {
+ char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
+
+ len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
+ if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
+ pr_err("Invalid field with length %d\n", len);
+ return -EINVAL;
+ }
+
+ memcpy(tmp_field_id, template_fmt_ptr, len);
+ tmp_field_id[len] = '\0';
+ found_fields[i] = lookup_template_field(tmp_field_id);
+ if (!found_fields[i]) {
+ pr_err("field '%s' not found\n", tmp_field_id);
+ return -ENOENT;
+ }
+ }
+
+ if (fields && num_fields) {
+ *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+ memcpy(*fields, found_fields, i * sizeof(**fields));
+ *num_fields = i;
+ }
+
+ return 0;
+}
+
+void ima_init_template_list(void)
+{
+ int i;
+
+ if (!list_empty(&defined_templates))
+ return;
+
+ spin_lock(&template_list);
+ for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+ list_add_tail_rcu(&builtin_templates[i].list,
+ &defined_templates);
+ }
+ spin_unlock(&template_list);
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template) {
+ ima_init_template_list();
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ }
+ return ima_template;
+}
+
+struct ima_template_desc *ima_template_desc_buf(void)
+{
+ if (!ima_buf_template) {
+ ima_init_template_list();
+ ima_buf_template = lookup_template_desc("ima-buf");
+ }
+ return ima_buf_template;
+}
+
+int __init ima_init_template(void)
+{
+ struct ima_template_desc *template = ima_template_desc_current();
+ int result;
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0) {
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
+ return result;
+ }
+
+ template = ima_template_desc_buf();
+ if (!template) {
+ pr_err("Failed to get ima-buf template\n");
+ return -EINVAL;
+ }
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
+
+ return result;
+}
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+ struct ima_template_desc *template_desc = NULL;
+ int ret;
+
+ ret = template_desc_init_fields(template_name, NULL, NULL);
+ if (ret < 0) {
+ pr_err("attempting to initialize the template \"%s\" failed\n",
+ template_name);
+ goto out;
+ }
+
+ template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+ if (!template_desc)
+ goto out;
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+ if (!template_desc->fmt) {
+ kfree(template_desc);
+ template_desc = NULL;
+ goto out;
+ }
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+ spin_unlock(&template_list);
+out:
+ return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+ void *template_data,
+ int template_data_size,
+ struct ima_template_entry **entry)
+{
+ struct tpm_digest *digests;
+ int ret = 0;
+ int i;
+
+ *entry = kzalloc(struct_size(*entry, template_data,
+ template_desc->num_fields), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*digests), GFP_NOFS);
+ if (!digests) {
+ kfree(*entry);
+ return -ENOMEM;
+ }
+
+ (*entry)->digests = digests;
+
+ ret = ima_parse_buf(template_data, template_data + template_data_size,
+ NULL, template_desc->num_fields,
+ (*entry)->template_data, NULL, NULL,
+ ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
+ if (ret < 0) {
+ kfree((*entry)->digests);
+ kfree(*entry);
+ return ret;
+ }
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_field_data *field_data = &(*entry)->template_data[i];
+ u8 *data = field_data->data;
+
+ (*entry)->template_data[i].data =
+ kzalloc(field_data->len + 1, GFP_KERNEL);
+ if (!(*entry)->template_data[i].data) {
+ ret = -ENOMEM;
+ break;
+ }
+ memcpy((*entry)->template_data[i].data, data, field_data->len);
+ (*entry)->template_data_len += sizeof(field_data->len);
+ (*entry)->template_data_len += field_data->len;
+ }
+
+ if (ret < 0) {
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ }
+
+ return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+ char template_name[MAX_TEMPLATE_NAME_LEN];
+ unsigned char zero[TPM_DIGEST_SIZE] = { 0 };
+
+ struct ima_kexec_hdr *khdr = buf;
+ struct ima_field_data hdr[HDR__LAST] = {
+ [HDR_PCR] = {.len = sizeof(u32)},
+ [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE},
+ };
+
+ void *bufp = buf + sizeof(*khdr);
+ void *bufendp;
+ struct ima_template_entry *entry;
+ struct ima_template_desc *template_desc;
+ DECLARE_BITMAP(hdr_mask, HDR__LAST);
+ unsigned long count = 0;
+ int ret = 0;
+
+ if (!buf || size < sizeof(*khdr))
+ return 0;
+
+ if (ima_canonical_fmt) {
+ khdr->version = le16_to_cpu((__force __le16)khdr->version);
+ khdr->count = le64_to_cpu((__force __le64)khdr->count);
+ khdr->buffer_size = le64_to_cpu((__force __le64)khdr->buffer_size);
+ }
+
+ if (khdr->version != 1) {
+ pr_err("attempting to restore a incompatible measurement list");
+ return -EINVAL;
+ }
+
+ if (khdr->count > ULONG_MAX - 1) {
+ pr_err("attempting to restore too many measurements");
+ return -EINVAL;
+ }
+
+ bitmap_zero(hdr_mask, HDR__LAST);
+ bitmap_set(hdr_mask, HDR_PCR, 1);
+ bitmap_set(hdr_mask, HDR_DIGEST, 1);
+
+ /*
+ * ima kexec buffer prefix: version, buffer size, count
+ * v1 format: pcr, digest, template-name-len, template-name,
+ * template-data-size, template-data
+ */
+ bufendp = buf + khdr->buffer_size;
+ while ((bufp < bufendp) && (count++ < khdr->count)) {
+ int enforce_mask = ENFORCE_FIELDS;
+
+ enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0;
+ ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL,
+ hdr_mask, enforce_mask, "entry header");
+ if (ret < 0)
+ break;
+
+ if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
+ pr_err("attempting to restore a template name that is too long\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* template name is not null terminated */
+ memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data,
+ hdr[HDR_TEMPLATE_NAME].len);
+ template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
+
+ if (strcmp(template_name, "ima") == 0) {
+ pr_err("attempting to restore an unsupported template \"%s\" failed\n",
+ template_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ template_desc = lookup_template_desc(template_name);
+ if (!template_desc) {
+ template_desc = restore_template_fmt(template_name);
+ if (!template_desc)
+ break;
+ }
+
+ /*
+ * Only the running system's template format is initialized
+ * on boot. As needed, initialize the other template formats.
+ */
+ ret = template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ if (ret < 0) {
+ pr_err("attempting to restore the template fmt \"%s\" failed\n",
+ template_desc->fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ima_restore_template_data(template_desc,
+ hdr[HDR_TEMPLATE_DATA].data,
+ hdr[HDR_TEMPLATE_DATA].len,
+ &entry);
+ if (ret < 0)
+ break;
+
+ if (memcmp(hdr[HDR_DIGEST].data, zero, sizeof(zero))) {
+ ret = ima_calc_field_array_hash(
+ &entry->template_data[0],
+ entry);
+ if (ret < 0) {
+ pr_err("cannot calculate template digest\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
+ le32_to_cpu(*(__le32 *)(hdr[HDR_PCR].data));
+ ret = ima_restore_measurement_entry(entry);
+ if (ret < 0)
+ break;
+
+ }
+ return ret;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000000..0e627eac9c33
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+
+#include "ima_template_lib.h"
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX,
+ DATA_FMT_UINT
+};
+
+enum digest_type {
+ DIGEST_TYPE_IMA,
+ DIGEST_TYPE_VERITY,
+ DIGEST_TYPE__LAST
+};
+
+#define DIGEST_TYPE_NAME_LEN_MAX 7 /* including NUL */
+static const char * const digest_type_name[DIGEST_TYPE__LAST] = {
+ [DIGEST_TYPE_IMA] = "ima",
+ [DIGEST_TYPE_VERITY] = "verity"
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen = datalen;
+
+ if (datafmt == DATA_FMT_STRING)
+ buflen = datalen + 1;
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data;
+ u32 buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO:
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strrchr(field_data->data, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ fallthrough;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ case DATA_FMT_UINT:
+ switch (field_data->len) {
+ case sizeof(u8):
+ seq_printf(m, "%u", *(u8 *)buf_ptr);
+ break;
+ case sizeof(u16):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le16_to_cpu(*(__le16 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u16 *)buf_ptr);
+ break;
+ case sizeof(u32):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%u",
+ le32_to_cpu(*(__le32 *)buf_ptr));
+ else
+ seq_printf(m, "%u", *(u32 *)buf_ptr);
+ break;
+ case sizeof(u64):
+ if (ima_canonical_fmt)
+ seq_printf(m, "%llu",
+ le64_to_cpu(*(__le64 *)buf_ptr));
+ else
+ seq_printf(m, "%llu", *(u64 *)buf_ptr);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+ strlen(field_data->data) : field_data->len;
+
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+ u32 field_len = !ima_canonical_fmt ?
+ len : (__force u32)cpu_to_le32(len);
+
+ ima_putc(m, &field_len, sizeof(field_len));
+ }
+
+ if (!len)
+ return;
+
+ ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ case IMA_SHOW_BINARY_OLD_STRING_FMT:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_digest_ngv2(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show,
+ DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_UINT, field_data);
+}
+
+/**
+ * ima_parse_buf() - Parses lengths and data from an input buffer
+ * @bufstartp: Buffer start address.
+ * @bufendp: Buffer end address.
+ * @bufcurp: Pointer to remaining (non-parsed) data.
+ * @maxfields: Length of fields array.
+ * @fields: Array containing lengths and pointers of parsed data.
+ * @curfields: Number of array items containing parsed data.
+ * @len_mask: Bitmap (if bit is set, data length should not be parsed).
+ * @enforce_mask: Check if curfields == maxfields and/or bufcurp == bufendp.
+ * @bufname: String identifier of the input buffer.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname)
+{
+ void *bufp = bufstartp;
+ int i;
+
+ for (i = 0; i < maxfields; i++) {
+ if (len_mask == NULL || !test_bit(i, len_mask)) {
+ if (bufp > (bufendp - sizeof(u32)))
+ break;
+
+ if (ima_canonical_fmt)
+ fields[i].len = le32_to_cpu(*(__le32 *)bufp);
+ else
+ fields[i].len = *(u32 *)bufp;
+
+ bufp += sizeof(u32);
+ }
+
+ if (bufp > (bufendp - fields[i].len))
+ break;
+
+ fields[i].data = bufp;
+ bufp += fields[i].len;
+ }
+
+ if ((enforce_mask & ENFORCE_FIELDS) && i != maxfields) {
+ pr_err("%s: nr of fields mismatch: expected: %d, current: %d\n",
+ bufname, maxfields, i);
+ return -EINVAL;
+ }
+
+ if ((enforce_mask & ENFORCE_BUFEND) && bufp != bufendp) {
+ pr_err("%s: buf end mismatch: expected: %p, current: %p\n",
+ bufname, bufendp, bufp);
+ return -EINVAL;
+ }
+
+ if (curfields)
+ *curfields = i;
+
+ if (bufcurp)
+ *bufcurp = bufp;
+
+ return 0;
+}
+
+static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,
+ u8 digest_type, u8 hash_algo,
+ struct ima_field_data *field_data)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: <hash algo> + ':' + '\0' + digest,
+ * - DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO:
+ * <digest type> + ':' + <hash algo> + ':' + '\0' + digest,
+ *
+ * where 'DATA_FMT_DIGEST' is the original digest format ('d')
+ * with a hash size limitation of 20 bytes,
+ * where <digest type> is either "ima" or "verity",
+ * where <hash algo> is the hash_algo_name[] string.
+ */
+ u8 buffer[DIGEST_TYPE_NAME_LEN_MAX + CRYPTO_MAX_ALG_NAME + 2 +
+ IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (digest_type < DIGEST_TYPE__LAST && hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_TYPE_AND_ALGO;
+ offset += 1 + sprintf(buffer, "%s:%s:",
+ digest_type_name[digest_type],
+ hash_algo_name[hash_algo]);
+ } else if (hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ offset += 1 + sprintf(buffer, "%s:",
+ hash_algo_name[hash_algo]);
+ }
+
+ if (digest) {
+ memcpy(buffer + offset, digest, digestsize);
+ } else {
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset by the
+ * hash algorithm digest size. If the hash algorithm is not
+ * specified increase the offset by IMA_DIGEST_SIZE which
+ * fits SHA1 or MD5
+ */
+ if (hash_algo < HASH_ALGO__LAST)
+ offset += hash_digest_size[hash_algo];
+ else
+ offset += IMA_DIGEST_SIZE;
+ }
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct ima_max_digest_data hash;
+ struct ima_digest_data *hash_hdr = container_of(&hash.hdr,
+ struct ima_digest_data, hdr);
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) {
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+ goto out;
+ }
+
+ if ((const char *)event_data->filename == boot_aggregate_name) {
+ if (ima_tpm_chip) {
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_boot_aggregate(hash_hdr);
+
+ /* algo can change depending on available PCR banks */
+ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
+ result = -EINVAL;
+
+ if (result < 0)
+ memset(&hash, 0, sizeof(hash));
+ }
+
+ cur_digest = hash_hdr->digest;
+ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
+ goto out;
+ }
+
+ if (!event_data->file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(event_data->file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(event_data->file, hash_hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ event_data->filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash_hdr->digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, HASH_ALGO__LAST,
+ field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = ima_hash_algo;
+ u32 cur_digestsize = 0;
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+
+ hash_algo = event_data->iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, hash_algo,
+ field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit),
+ * prefixed with both the digest type and hash algorithm.
+ */
+int ima_eventdigest_ngv2_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = ima_hash_algo;
+ u32 cur_digestsize = 0;
+ u8 digest_type = DIGEST_TYPE_IMA;
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+
+ hash_algo = event_data->iint->ima_hash->algo;
+ if (event_data->iint->flags & IMA_VERITY_REQUIRED)
+ digest_type = DIGEST_TYPE_VERITY;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ digest_type, hash_algo,
+ field_data);
+}
+
+/*
+ * This function writes the digest of the file which is expected to match the
+ * digest contained in the file's appended signature.
+ */
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ enum hash_algo hash_algo;
+ const u8 *cur_digest;
+ u32 cur_digestsize;
+
+ if (!event_data->modsig)
+ return 0;
+
+ if (event_data->violation) {
+ /* Recording a violation. */
+ hash_algo = HASH_ALGO_SHA1;
+ cur_digest = NULL;
+ cur_digestsize = 0;
+ } else {
+ int rc;
+
+ rc = ima_get_modsig_digest(event_data->modsig, &hash_algo,
+ &cur_digest, &cur_digestsize);
+ if (rc)
+ return rc;
+ else if (hash_algo == HASH_ALGO__LAST || cur_digestsize == 0)
+ /* There was some error collecting the digest. */
+ return -EINVAL;
+ }
+
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ DIGEST_TYPE__LAST, hash_algo,
+ field_data);
+}
+
+static int ima_eventname_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ struct name_snapshot filename;
+ u32 cur_filename_len = 0;
+ bool snapshot = false;
+ int ret;
+
+ BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+
+ if (event_data->filename) {
+ cur_filename = event_data->filename;
+ cur_filename_len = strlen(event_data->filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (event_data->file) {
+ take_dentry_name_snapshot(&filename,
+ event_data->file->f_path.dentry);
+ snapshot = true;
+ cur_filename = filename.name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ ret = ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+
+ if (snapshot)
+ release_dentry_name_snapshot(&filename);
+
+ return ret;
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_value = event_data->xattr_value;
+
+ if (!xattr_value ||
+ (xattr_value->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_value->type != IMA_VERITY_DIGSIG))
+ return ima_eventevmsig_init(event_data, field_data);
+
+ return ima_write_template_field_data(xattr_value, event_data->xattr_len,
+ DATA_FMT_HEX, field_data);
+}
+
+/*
+ * ima_eventbuf_init - include the buffer(kexec-cmldine) as part of the
+ * template data.
+ */
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ if ((!event_data->buf) || (event_data->buf_len == 0))
+ return 0;
+
+ return ima_write_template_field_data(event_data->buf,
+ event_data->buf_len, DATA_FMT_HEX,
+ field_data);
+}
+
+/*
+ * ima_eventmodsig_init - include the appended file signature as part of the
+ * template data
+ */
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ const void *data;
+ u32 data_len;
+ int rc;
+
+ if (!event_data->modsig)
+ return 0;
+
+ /*
+ * modsig is a runtime structure containing pointers. Get its raw data
+ * instead.
+ */
+ rc = ima_get_raw_modsig(event_data->modsig, &data, &data_len);
+ if (rc)
+ return rc;
+
+ return ima_write_template_field_data(data, data_len, DATA_FMT_HEX,
+ field_data);
+}
+
+/*
+ * ima_eventevmsig_init - include the EVM portable signature as part of the
+ * template data
+ */
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ int rc = 0;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = vfs_getxattr_alloc(&nop_mnt_idmap, file_dentry(event_data->file),
+ XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0 || xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = ima_write_template_field_data((char *)xattr_data, rc, DATA_FMT_HEX,
+ field_data);
+
+out:
+ kfree(xattr_data);
+ return rc;
+}
+
+static int ima_eventinodedac_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool get_uid)
+{
+ unsigned int id;
+
+ if (!event_data->file)
+ return 0;
+
+ if (get_uid)
+ id = i_uid_read(file_inode(event_data->file));
+ else
+ id = i_gid_read(file_inode(event_data->file));
+
+ if (ima_canonical_fmt) {
+ if (sizeof(id) == sizeof(u16))
+ id = (__force u16)cpu_to_le16(id);
+ else
+ id = (__force u32)cpu_to_le32(id);
+ }
+
+ return ima_write_template_field_data((void *)&id, sizeof(id),
+ DATA_FMT_UINT, field_data);
+}
+
+/*
+ * ima_eventinodeuid_init - include the inode UID as part of the template
+ * data
+ */
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, true);
+}
+
+/*
+ * ima_eventinodegid_init - include the inode GID as part of the template
+ * data
+ */
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodedac_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventinodemode_init - include the inode mode as part of the template
+ * data
+ */
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct inode *inode;
+ u16 mode;
+
+ if (!event_data->file)
+ return 0;
+
+ inode = file_inode(event_data->file);
+ mode = inode->i_mode;
+ if (ima_canonical_fmt)
+ mode = (__force u16)cpu_to_le16(mode);
+
+ return ima_write_template_field_data((char *)&mode, sizeof(mode),
+ DATA_FMT_UINT, field_data);
+}
+
+static int ima_eventinodexattrs_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ char type)
+{
+ u8 *buffer = NULL;
+ int rc;
+
+ if (!event_data->file)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), NULL, 0,
+ type, ima_canonical_fmt);
+ if (rc < 0)
+ return 0;
+
+ buffer = kmalloc(rc, GFP_KERNEL);
+ if (!buffer)
+ return 0;
+
+ rc = evm_read_protected_xattrs(file_dentry(event_data->file), buffer,
+ rc, type, ima_canonical_fmt);
+ if (rc < 0) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = ima_write_template_field_data((char *)buffer, rc, DATA_FMT_HEX,
+ field_data);
+out:
+ kfree(buffer);
+ return rc;
+}
+
+/*
+ * ima_eventinodexattrnames_init - include a list of xattr names as part of the
+ * template data
+ */
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'n');
+}
+
+/*
+ * ima_eventinodexattrlengths_init - include a list of xattr lengths as part of
+ * the template data
+ */
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'l');
+}
+
+/*
+ * ima_eventinodexattrvalues_init - include a list of xattr values as part of
+ * the template data
+ */
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventinodexattrs_init_common(event_data, field_data, 'v');
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000000..9f7c335f304f
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+#define ENFORCE_FIELDS 0x00000001
+#define ENFORCE_BUFEND 0x00000002
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ngv2(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_buf(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_uint(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname);
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ngv2_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_modsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventbuf_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventmodsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventevmsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodeuid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodegid_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodemode_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrnames_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrlengths_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventinodexattrvalues_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */