summaryrefslogtreecommitdiff
path: root/security/integrity/ima/ima_crypto.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/integrity/ima/ima_crypto.c')
-rw-r--r--security/integrity/ima/ima_crypto.c419
1 files changed, 301 insertions, 118 deletions
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 802d5d20f36f..6f5696d999d0 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
@@ -5,16 +6,10 @@
* Mimi Zohar <zohar@us.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, version 2 of the License.
- *
* File: ima_crypto.c
* Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/ratelimit.h>
@@ -27,11 +22,6 @@
#include "ima.h"
-struct ahash_completion {
- struct completion completion;
- int err;
-};
-
/* minimum file size for ahash use */
static unsigned long ima_ahash_minsize;
module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
@@ -48,7 +38,7 @@ static int param_set_bufsize(const char *val, const struct kernel_param *kp)
size = memparse(val, NULL);
order = get_order(size);
- if (order >= MAX_ORDER)
+ if (order > MAX_PAGE_ORDER)
return -EINVAL;
ima_maxorder = order;
ima_bufsize = PAGE_SIZE << order;
@@ -67,7 +57,17 @@ MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
static struct crypto_shash *ima_shash_tfm;
static struct crypto_ahash *ima_ahash_tfm;
-int __init ima_init_crypto(void)
+int ima_sha1_idx __ro_after_init;
+int ima_hash_algo_idx __ro_after_init;
+/*
+ * Additional number of slots reserved, as needed, for SHA1
+ * and IMA default algo.
+ */
+int ima_extra_slots __ro_after_init;
+
+struct ima_algo_desc *ima_algo_array __ro_after_init;
+
+static int __init ima_init_ima_crypto(void)
{
long rc;
@@ -78,32 +78,146 @@ int __init ima_init_crypto(void)
hash_algo_name[ima_hash_algo], rc);
return rc;
}
+ pr_info("Allocated hash algorithm: %s\n",
+ hash_algo_name[ima_hash_algo]);
return 0;
}
static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
{
struct crypto_shash *tfm = ima_shash_tfm;
- int rc;
+ int rc, i;
if (algo < 0 || algo >= HASH_ALGO__LAST)
algo = ima_hash_algo;
- if (algo != ima_hash_algo) {
- tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
- if (IS_ERR(tfm)) {
- rc = PTR_ERR(tfm);
- pr_err("Can not allocate %s (reason: %d)\n",
- hash_algo_name[algo], rc);
- }
+ if (algo == ima_hash_algo)
+ return tfm;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
+ return ima_algo_array[i].tfm;
+
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
}
return tfm;
}
+int __init ima_init_crypto(void)
+{
+ enum hash_algo algo;
+ long rc;
+ int i;
+
+ rc = ima_init_ima_crypto();
+ if (rc)
+ return rc;
+
+ ima_sha1_idx = -1;
+ ima_hash_algo_idx = -1;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (algo == HASH_ALGO_SHA1)
+ ima_sha1_idx = i;
+
+ if (algo == ima_hash_algo)
+ ima_hash_algo_idx = i;
+ }
+
+ if (ima_sha1_idx < 0) {
+ ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+ if (ima_hash_algo == HASH_ALGO_SHA1)
+ ima_hash_algo_idx = ima_sha1_idx;
+ }
+
+ if (ima_hash_algo_idx < 0)
+ ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
+
+ ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
+ sizeof(*ima_algo_array), GFP_KERNEL);
+ if (!ima_algo_array) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
+ algo = ima_tpm_chip->allocated_banks[i].crypto_id;
+ ima_algo_array[i].algo = algo;
+
+ /* unknown TPM algorithm */
+ if (algo == HASH_ALGO__LAST)
+ continue;
+
+ if (algo == ima_hash_algo) {
+ ima_algo_array[i].tfm = ima_shash_tfm;
+ continue;
+ }
+
+ ima_algo_array[i].tfm = ima_alloc_tfm(algo);
+ if (IS_ERR(ima_algo_array[i].tfm)) {
+ if (algo == HASH_ALGO_SHA1) {
+ rc = PTR_ERR(ima_algo_array[i].tfm);
+ ima_algo_array[i].tfm = NULL;
+ goto out_array;
+ }
+
+ ima_algo_array[i].tfm = NULL;
+ }
+ }
+
+ if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
+ if (ima_hash_algo == HASH_ALGO_SHA1) {
+ ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
+ } else {
+ ima_algo_array[ima_sha1_idx].tfm =
+ ima_alloc_tfm(HASH_ALGO_SHA1);
+ if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
+ rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
+ goto out_array;
+ }
+ }
+
+ ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
+ }
+
+ if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
+ ima_hash_algo_idx != ima_sha1_idx) {
+ ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
+ ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
+ }
+
+ return 0;
+out_array:
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (!ima_algo_array[i].tfm ||
+ ima_algo_array[i].tfm == ima_shash_tfm)
+ continue;
+
+ crypto_free_shash(ima_algo_array[i].tfm);
+ }
+ kfree(ima_algo_array);
+out:
+ crypto_free_shash(ima_shash_tfm);
+ return rc;
+}
+
static void ima_free_tfm(struct crypto_shash *tfm)
{
- if (tfm != ima_shash_tfm)
- crypto_free_shash(tfm);
+ int i;
+
+ if (tfm == ima_shash_tfm)
+ return;
+
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
+ if (ima_algo_array[i].tfm == tfm)
+ return;
+
+ crypto_free_shash(tfm);
}
/**
@@ -196,30 +310,13 @@ static void ima_free_atfm(struct crypto_ahash *tfm)
crypto_free_ahash(tfm);
}
-static void ahash_complete(struct crypto_async_request *req, int err)
+static inline int ahash_wait(int err, struct crypto_wait *wait)
{
- struct ahash_completion *res = req->data;
- if (err == -EINPROGRESS)
- return;
- res->err = err;
- complete(&res->completion);
-}
+ err = crypto_wait_req(err, wait);
-static int ahash_wait(int err, struct ahash_completion *res)
-{
- switch (err) {
- case 0:
- break;
- case -EINPROGRESS:
- case -EBUSY:
- wait_for_completion(&res->completion);
- reinit_completion(&res->completion);
- err = res->err;
- /* fall through */
- default:
+ if (err)
pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
- }
return err;
}
@@ -230,10 +327,10 @@ static int ima_calc_file_hash_atfm(struct file *file,
{
loff_t i_size, offset;
char *rbuf[2] = { NULL, };
- int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
struct ahash_request *req;
struct scatterlist sg[1];
- struct ahash_completion res;
+ struct crypto_wait wait;
size_t rbuf_size[2];
hash->length = crypto_ahash_digestsize(tfm);
@@ -242,12 +339,12 @@ static int ima_calc_file_hash_atfm(struct file *file,
if (!req)
return -ENOMEM;
- init_completion(&res.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- ahash_complete, &res);
+ crypto_req_done, &wait);
- rc = ahash_wait(crypto_ahash_init(req), &res);
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
if (rc)
goto out1;
@@ -277,18 +374,13 @@ static int ima_calc_file_hash_atfm(struct file *file,
&rbuf_size[1], 0);
}
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
for (offset = 0; offset < i_size; offset += rbuf_len) {
if (!rbuf[1] && offset) {
/* Not using two buffers, and it is not the first
* read/request, wait for the completion of the
* previous ahash_update() request.
*/
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (rc)
goto out3;
}
@@ -296,15 +388,23 @@ static int ima_calc_file_hash_atfm(struct file *file,
rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
rc = integrity_kernel_read(file, offset, rbuf[active],
rbuf_len);
- if (rc != rbuf_len)
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
goto out3;
+ }
if (rbuf[1] && offset) {
/* Using two buffers, and it is not the first
* read/request, wait for the completion of the
* previous ahash_update() request.
*/
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (rc)
goto out3;
}
@@ -318,16 +418,14 @@ static int ima_calc_file_hash_atfm(struct file *file,
active = !active; /* swap buffers, if we use two */
}
/* wait for the last update request to complete */
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
out3:
- if (read)
- file->f_mode &= ~FMODE_READ;
ima_free_pages(rbuf[0], rbuf_size[0]);
ima_free_pages(rbuf[1], rbuf_size[1]);
out2:
if (!rc) {
ahash_request_set_crypt(req, NULL, hash->digest, 0);
- rc = ahash_wait(crypto_ahash_final(req), &res);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
}
out1:
ahash_request_free(req);
@@ -356,11 +454,10 @@ static int ima_calc_file_hash_tfm(struct file *file,
{
loff_t i_size, offset = 0;
char *rbuf;
- int rc, read = 0;
+ int rc;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = 0;
hash->length = crypto_shash_digestsize(tfm);
@@ -377,11 +474,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
if (!rbuf)
return -ENOMEM;
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
while (offset < i_size) {
int rbuf_len;
@@ -390,16 +482,16 @@ static int ima_calc_file_hash_tfm(struct file *file,
rc = rbuf_len;
break;
}
- if (rbuf_len == 0)
+ if (rbuf_len == 0) { /* unexpected EOF */
+ rc = -EINVAL;
break;
+ }
offset += rbuf_len;
rc = crypto_shash_update(shash, rbuf, rbuf_len);
if (rc)
break;
}
- if (read)
- file->f_mode &= ~FMODE_READ;
kfree(rbuf);
out:
if (!rc)
@@ -440,34 +532,59 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
loff_t i_size;
int rc;
+ struct file *f = file;
+ bool new_file_instance = false;
- i_size = i_size_read(file_inode(file));
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ new_file_instance = true;
+ }
+
+ i_size = i_size_read(file_inode(f));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
- rc = ima_calc_file_ahash(file, hash);
+ rc = ima_calc_file_ahash(f, hash);
if (!rc)
- return 0;
+ goto out;
}
- return ima_calc_file_shash(file, hash);
+ rc = ima_calc_file_shash(f, hash);
+out:
+ if (new_file_instance)
+ fput(f);
+ return rc;
}
/*
* Calculate the hash of template data
*/
static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
- struct ima_template_desc *td,
- int num_fields,
- struct ima_digest_data *hash,
- struct crypto_shash *tfm)
+ struct ima_template_entry *entry,
+ int tfm_idx)
{
- SHASH_DESC_ON_STACK(shash, tfm);
+ SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
+ struct ima_template_desc *td = entry->template_desc;
+ int num_fields = entry->template_desc->num_fields;
int rc, i;
- shash->tfm = tfm;
- shash->flags = 0;
-
- hash->length = crypto_shash_digestsize(tfm);
+ shash->tfm = ima_algo_array[tfm_idx].tfm;
rc = crypto_shash_init(shash);
if (rc != 0)
@@ -477,8 +594,8 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
u8 *data_to_hash = field_data[i].data;
u32 datalen = field_data[i].len;
- u32 datalen_to_hash =
- !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
+ u32 datalen_to_hash = !ima_canonical_fmt ?
+ datalen : (__force u32)cpu_to_le32(datalen);
if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
rc = crypto_shash_update(shash,
@@ -497,27 +614,44 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
}
if (!rc)
- rc = crypto_shash_final(shash, hash->digest);
+ rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
return rc;
}
int ima_calc_field_array_hash(struct ima_field_data *field_data,
- struct ima_template_desc *desc, int num_fields,
- struct ima_digest_data *hash)
+ struct ima_template_entry *entry)
{
- struct crypto_shash *tfm;
- int rc;
+ u16 alg_id;
+ int rc, i;
- tfm = ima_alloc_tfm(hash->algo);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
+ if (rc)
+ return rc;
- rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
- hash, tfm);
+ entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
- ima_free_tfm(tfm);
+ for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
+ if (i == ima_sha1_idx)
+ continue;
+
+ if (i < NR_BANKS(ima_tpm_chip)) {
+ alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
+ entry->digests[i].alg_id = alg_id;
+ }
+
+ /* for unmapped TPM algorithms digest is still a padded SHA1 */
+ if (!ima_algo_array[i].tfm) {
+ memcpy(entry->digests[i].digest,
+ entry->digests[ima_sha1_idx].digest,
+ TPM_DIGEST_SIZE);
+ continue;
+ }
+ rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
+ if (rc)
+ return rc;
+ }
return rc;
}
@@ -527,7 +661,7 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
{
struct ahash_request *req;
struct scatterlist sg;
- struct ahash_completion res;
+ struct crypto_wait wait;
int rc, ahash_rc = 0;
hash->length = crypto_ahash_digestsize(tfm);
@@ -536,12 +670,12 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
if (!req)
return -ENOMEM;
- init_completion(&res.completion);
+ crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
- ahash_complete, &res);
+ crypto_req_done, &wait);
- rc = ahash_wait(crypto_ahash_init(req), &res);
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
if (rc)
goto out;
@@ -551,10 +685,10 @@ static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
ahash_rc = crypto_ahash_update(req);
/* wait for the update request to complete */
- rc = ahash_wait(ahash_rc, &res);
+ rc = ahash_wait(ahash_rc, &wait);
if (!rc) {
ahash_request_set_crypt(req, NULL, hash->digest, 0);
- rc = ahash_wait(crypto_ahash_final(req), &res);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
}
out:
ahash_request_free(req);
@@ -587,7 +721,6 @@ static int calc_buffer_shash_tfm(const void *buf, loff_t size,
int rc;
shash->tfm = tfm;
- shash->flags = 0;
hash->length = crypto_shash_digestsize(tfm);
@@ -639,54 +772,104 @@ int ima_calc_buffer_hash(const void *buf, loff_t len,
return calc_buffer_shash(buf, len, hash);
}
-static void __init ima_pcrread(int idx, u8 *pcr)
+static void ima_pcrread(u32 idx, struct tpm_digest *d)
{
- if (!ima_used_chip)
+ if (!ima_tpm_chip)
return;
- if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
+ if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
pr_err("Error Communicating to TPM chip\n");
}
/*
- * Calculate the boot aggregate hash
+ * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
+ * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
+ * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
+ * allowing firmware to configure and enable different banks.
+ *
+ * Knowing which TPM bank is read to calculate the boot_aggregate digest
+ * needs to be conveyed to a verifier. For this reason, use the same
+ * hash algorithm for reading the TPM PCRs as for calculating the boot
+ * aggregate digest as stored in the measurement list.
*/
-static int __init ima_calc_boot_aggregate_tfm(char *digest,
- struct crypto_shash *tfm)
+static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
+ struct crypto_shash *tfm)
{
- u8 pcr_i[TPM_DIGEST_SIZE];
- int rc, i;
+ struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
+ int rc;
+ u32 i;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = 0;
+
+ pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
+ d.alg_id);
rc = crypto_shash_init(shash);
if (rc != 0)
return rc;
- /* cumulative sha1 over tpm registers 0-7 */
+ /* cumulative digest over TPM registers 0-7 */
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
- ima_pcrread(i, pcr_i);
+ ima_pcrread(i, &d);
/* now accumulate with current aggregate */
- rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ if (rc != 0)
+ return rc;
+ }
+ /*
+ * Extend cumulative digest over TPM registers 8-9, which contain
+ * measurement for the kernel command line (reg. 8) and image (reg. 9)
+ * in a typical PCR allocation. Registers 8-9 are only included in
+ * non-SHA1 boot_aggregate digests to avoid ambiguity.
+ */
+ if (alg_id != TPM_ALG_SHA1) {
+ for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+ ima_pcrread(i, &d);
+ rc = crypto_shash_update(shash, d.digest,
+ crypto_shash_digestsize(tfm));
+ }
}
if (!rc)
crypto_shash_final(shash, digest);
return rc;
}
-int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
{
struct crypto_shash *tfm;
- int rc;
+ u16 crypto_id, alg_id;
+ int rc, i, bank_idx = -1;
+
+ for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
+ crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
+ if (crypto_id == hash->algo) {
+ bank_idx = i;
+ break;
+ }
+
+ if (crypto_id == HASH_ALGO_SHA256)
+ bank_idx = i;
+
+ if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
+ bank_idx = i;
+ }
+
+ if (bank_idx == -1) {
+ pr_err("No suitable TPM algorithm for boot aggregate\n");
+ return 0;
+ }
+
+ hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
tfm = ima_alloc_tfm(hash->algo);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
hash->length = crypto_shash_digestsize(tfm);
- rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+ alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
ima_free_tfm(tfm);