1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
6 * Mimi Zohar <zohar@us.ibm.com>
7 * Kylene Hall <kjhall@us.ibm.com>
10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/moduleparam.h>
17 #include <linux/ratelimit.h>
18 #include <linux/file.h>
19 #include <linux/crypto.h>
20 #include <linux/scatterlist.h>
21 #include <linux/err.h>
22 #include <linux/slab.h>
23 #include <crypto/hash.h>
27 /* minimum file size for ahash use */
28 static unsigned long ima_ahash_minsize;
29 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
30 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
32 /* default is 0 - 1 page. */
33 static int ima_maxorder;
34 static unsigned int ima_bufsize = PAGE_SIZE;
36 static int param_set_bufsize(const char *val, const struct kernel_param *kp)
38 unsigned long long size;
41 size = memparse(val, NULL);
42 order = get_order(size);
43 if (order >= MAX_ORDER)
46 ima_bufsize = PAGE_SIZE << order;
50 static const struct kernel_param_ops param_ops_bufsize = {
51 .set = param_set_bufsize,
52 .get = param_get_uint,
54 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
56 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
57 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
59 static struct crypto_shash *ima_shash_tfm;
60 static struct crypto_ahash *ima_ahash_tfm;
62 int __init ima_init_crypto(void)
66 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
67 if (IS_ERR(ima_shash_tfm)) {
68 rc = PTR_ERR(ima_shash_tfm);
69 pr_err("Can not allocate %s (reason: %ld)\n",
70 hash_algo_name[ima_hash_algo], rc);
73 pr_info("Allocated hash algorithm: %s\n",
74 hash_algo_name[ima_hash_algo]);
78 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
80 struct crypto_shash *tfm = ima_shash_tfm;
83 if (algo < 0 || algo >= HASH_ALGO__LAST)
86 if (algo != ima_hash_algo) {
87 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
90 pr_err("Can not allocate %s (reason: %d)\n",
91 hash_algo_name[algo], rc);
97 static void ima_free_tfm(struct crypto_shash *tfm)
99 if (tfm != ima_shash_tfm)
100 crypto_free_shash(tfm);
104 * ima_alloc_pages() - Allocate contiguous pages.
105 * @max_size: Maximum amount of memory to allocate.
106 * @allocated_size: Returned size of actual allocation.
107 * @last_warn: Should the min_size allocation warn or not.
109 * Tries to do opportunistic allocation for memory first trying to allocate
110 * max_size amount of memory and then splitting that until zero order is
111 * reached. Allocation is tried without generating allocation warnings unless
112 * last_warn is set. Last_warn set affects only last allocation of zero order.
114 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
116 * Return pointer to allocated memory, or NULL on failure.
118 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
122 int order = ima_maxorder;
123 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
126 order = min(get_order(max_size), order);
128 for (; order; order--) {
129 ptr = (void *)__get_free_pages(gfp_mask, order);
131 *allocated_size = PAGE_SIZE << order;
136 /* order is zero - one page */
138 gfp_mask = GFP_KERNEL;
141 gfp_mask |= __GFP_NOWARN;
143 ptr = (void *)__get_free_pages(gfp_mask, 0);
145 *allocated_size = PAGE_SIZE;
154 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
155 * @ptr: Pointer to allocated pages.
156 * @size: Size of allocated buffer.
158 static void ima_free_pages(void *ptr, size_t size)
162 free_pages((unsigned long)ptr, get_order(size));
165 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
167 struct crypto_ahash *tfm = ima_ahash_tfm;
170 if (algo < 0 || algo >= HASH_ALGO__LAST)
171 algo = ima_hash_algo;
173 if (algo != ima_hash_algo || !tfm) {
174 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
176 if (algo == ima_hash_algo)
180 pr_err("Can not allocate %s (reason: %d)\n",
181 hash_algo_name[algo], rc);
187 static void ima_free_atfm(struct crypto_ahash *tfm)
189 if (tfm != ima_ahash_tfm)
190 crypto_free_ahash(tfm);
193 static inline int ahash_wait(int err, struct crypto_wait *wait)
196 err = crypto_wait_req(err, wait);
199 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
204 static int ima_calc_file_hash_atfm(struct file *file,
205 struct ima_digest_data *hash,
206 struct crypto_ahash *tfm)
208 loff_t i_size, offset;
209 char *rbuf[2] = { NULL, };
210 int rc, rbuf_len, active = 0, ahash_rc = 0;
211 struct ahash_request *req;
212 struct scatterlist sg[1];
213 struct crypto_wait wait;
216 hash->length = crypto_ahash_digestsize(tfm);
218 req = ahash_request_alloc(tfm, GFP_KERNEL);
222 crypto_init_wait(&wait);
223 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
224 CRYPTO_TFM_REQ_MAY_SLEEP,
225 crypto_req_done, &wait);
227 rc = ahash_wait(crypto_ahash_init(req), &wait);
231 i_size = i_size_read(file_inode(file));
237 * Try to allocate maximum size of memory.
238 * Fail if even a single page cannot be allocated.
240 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
246 /* Only allocate one buffer if that is enough. */
247 if (i_size > rbuf_size[0]) {
249 * Try to allocate secondary buffer. If that fails fallback to
250 * using single buffering. Use previous memory allocation size
251 * as baseline for possible allocation size.
253 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
257 for (offset = 0; offset < i_size; offset += rbuf_len) {
258 if (!rbuf[1] && offset) {
259 /* Not using two buffers, and it is not the first
260 * read/request, wait for the completion of the
261 * previous ahash_update() request.
263 rc = ahash_wait(ahash_rc, &wait);
268 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
269 rc = integrity_kernel_read(file, offset, rbuf[active],
271 if (rc != rbuf_len) {
275 * Forward current rc, do not overwrite with return value
278 ahash_wait(ahash_rc, &wait);
282 if (rbuf[1] && offset) {
283 /* Using two buffers, and it is not the first
284 * read/request, wait for the completion of the
285 * previous ahash_update() request.
287 rc = ahash_wait(ahash_rc, &wait);
292 sg_init_one(&sg[0], rbuf[active], rbuf_len);
293 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
295 ahash_rc = crypto_ahash_update(req);
298 active = !active; /* swap buffers, if we use two */
300 /* wait for the last update request to complete */
301 rc = ahash_wait(ahash_rc, &wait);
303 ima_free_pages(rbuf[0], rbuf_size[0]);
304 ima_free_pages(rbuf[1], rbuf_size[1]);
307 ahash_request_set_crypt(req, NULL, hash->digest, 0);
308 rc = ahash_wait(crypto_ahash_final(req), &wait);
311 ahash_request_free(req);
315 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
317 struct crypto_ahash *tfm;
320 tfm = ima_alloc_atfm(hash->algo);
324 rc = ima_calc_file_hash_atfm(file, hash, tfm);
331 static int ima_calc_file_hash_tfm(struct file *file,
332 struct ima_digest_data *hash,
333 struct crypto_shash *tfm)
335 loff_t i_size, offset = 0;
338 SHASH_DESC_ON_STACK(shash, tfm);
342 hash->length = crypto_shash_digestsize(tfm);
344 rc = crypto_shash_init(shash);
348 i_size = i_size_read(file_inode(file));
353 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
357 while (offset < i_size) {
360 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
365 if (rbuf_len == 0) { /* unexpected EOF */
371 rc = crypto_shash_update(shash, rbuf, rbuf_len);
378 rc = crypto_shash_final(shash, hash->digest);
382 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
384 struct crypto_shash *tfm;
387 tfm = ima_alloc_tfm(hash->algo);
391 rc = ima_calc_file_hash_tfm(file, hash, tfm);
399 * ima_calc_file_hash - calculate file hash
401 * Asynchronous hash (ahash) allows using HW acceleration for calculating
402 * a hash. ahash performance varies for different data sizes on different
403 * crypto accelerators. shash performance might be better for smaller files.
404 * The 'ima.ahash_minsize' module parameter allows specifying the best
405 * minimum file size for using ahash on the system.
407 * If the ima.ahash_minsize parameter is not specified, this function uses
408 * shash for the hash calculation. If ahash fails, it falls back to using
411 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
415 struct file *f = file;
416 bool new_file_instance = false, modified_flags = false;
419 * For consistency, fail file's opened with the O_DIRECT flag on
420 * filesystems mounted with/without DAX option.
422 if (file->f_flags & O_DIRECT) {
423 hash->length = hash_digest_size[ima_hash_algo];
424 hash->algo = ima_hash_algo;
428 /* Open a new file instance in O_RDONLY if we cannot read */
429 if (!(file->f_mode & FMODE_READ)) {
430 int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
431 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
433 f = dentry_open(&file->f_path, flags, file->f_cred);
436 * Cannot open the file again, lets modify f_flags
437 * of original and continue
439 pr_info_ratelimited("Unable to reopen file for reading.\n");
441 f->f_flags |= FMODE_READ;
442 modified_flags = true;
444 new_file_instance = true;
448 i_size = i_size_read(file_inode(f));
450 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
451 rc = ima_calc_file_ahash(f, hash);
456 rc = ima_calc_file_shash(f, hash);
458 if (new_file_instance)
460 else if (modified_flags)
461 f->f_flags &= ~FMODE_READ;
466 * Calculate the hash of template data
468 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
469 struct ima_template_desc *td,
471 struct ima_digest_data *hash,
472 struct crypto_shash *tfm)
474 SHASH_DESC_ON_STACK(shash, tfm);
479 hash->length = crypto_shash_digestsize(tfm);
481 rc = crypto_shash_init(shash);
485 for (i = 0; i < num_fields; i++) {
486 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
487 u8 *data_to_hash = field_data[i].data;
488 u32 datalen = field_data[i].len;
489 u32 datalen_to_hash =
490 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
492 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
493 rc = crypto_shash_update(shash,
494 (const u8 *) &datalen_to_hash,
495 sizeof(datalen_to_hash));
498 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
499 memcpy(buffer, data_to_hash, datalen);
500 data_to_hash = buffer;
501 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
503 rc = crypto_shash_update(shash, data_to_hash, datalen);
509 rc = crypto_shash_final(shash, hash->digest);
514 int ima_calc_field_array_hash(struct ima_field_data *field_data,
515 struct ima_template_desc *desc, int num_fields,
516 struct ima_digest_data *hash)
518 struct crypto_shash *tfm;
521 tfm = ima_alloc_tfm(hash->algo);
525 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
533 static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
534 struct ima_digest_data *hash,
535 struct crypto_ahash *tfm)
537 struct ahash_request *req;
538 struct scatterlist sg;
539 struct crypto_wait wait;
540 int rc, ahash_rc = 0;
542 hash->length = crypto_ahash_digestsize(tfm);
544 req = ahash_request_alloc(tfm, GFP_KERNEL);
548 crypto_init_wait(&wait);
549 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
550 CRYPTO_TFM_REQ_MAY_SLEEP,
551 crypto_req_done, &wait);
553 rc = ahash_wait(crypto_ahash_init(req), &wait);
557 sg_init_one(&sg, buf, len);
558 ahash_request_set_crypt(req, &sg, NULL, len);
560 ahash_rc = crypto_ahash_update(req);
562 /* wait for the update request to complete */
563 rc = ahash_wait(ahash_rc, &wait);
565 ahash_request_set_crypt(req, NULL, hash->digest, 0);
566 rc = ahash_wait(crypto_ahash_final(req), &wait);
569 ahash_request_free(req);
573 static int calc_buffer_ahash(const void *buf, loff_t len,
574 struct ima_digest_data *hash)
576 struct crypto_ahash *tfm;
579 tfm = ima_alloc_atfm(hash->algo);
583 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
590 static int calc_buffer_shash_tfm(const void *buf, loff_t size,
591 struct ima_digest_data *hash,
592 struct crypto_shash *tfm)
594 SHASH_DESC_ON_STACK(shash, tfm);
600 hash->length = crypto_shash_digestsize(tfm);
602 rc = crypto_shash_init(shash);
607 len = size < PAGE_SIZE ? size : PAGE_SIZE;
608 rc = crypto_shash_update(shash, buf, len);
616 rc = crypto_shash_final(shash, hash->digest);
620 static int calc_buffer_shash(const void *buf, loff_t len,
621 struct ima_digest_data *hash)
623 struct crypto_shash *tfm;
626 tfm = ima_alloc_tfm(hash->algo);
630 rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
636 int ima_calc_buffer_hash(const void *buf, loff_t len,
637 struct ima_digest_data *hash)
641 if (ima_ahash_minsize && len >= ima_ahash_minsize) {
642 rc = calc_buffer_ahash(buf, len, hash);
647 return calc_buffer_shash(buf, len, hash);
650 static void __init ima_pcrread(u32 idx, struct tpm_digest *d)
655 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
656 pr_err("Error Communicating to TPM chip\n");
660 * Calculate the boot aggregate hash
662 static int __init ima_calc_boot_aggregate_tfm(char *digest,
663 struct crypto_shash *tfm)
665 struct tpm_digest d = { .alg_id = TPM_ALG_SHA1, .digest = {0} };
668 SHASH_DESC_ON_STACK(shash, tfm);
672 rc = crypto_shash_init(shash);
676 /* cumulative sha1 over tpm registers 0-7 */
677 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
679 /* now accumulate with current aggregate */
680 rc = crypto_shash_update(shash, d.digest, TPM_DIGEST_SIZE);
683 crypto_shash_final(shash, digest);
687 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
689 struct crypto_shash *tfm;
692 tfm = ima_alloc_tfm(hash->algo);
696 hash->length = crypto_shash_digestsize(tfm);
697 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);