1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Test cases for hash functions, including a benchmark. This is included by
* KUnit test suites that want to use it. See sha512_kunit.c for an example.
*
* Copyright 2025 Google LLC
*/
#include <kunit/test.h>
#include <linux/hrtimer.h>
#include <linux/timekeeping.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
/* test_buf is a guarded buffer, i.e. &test_buf[TEST_BUF_LEN] is not mapped. */
#define TEST_BUF_LEN 16384
static u8 *test_buf;
static u8 *orig_test_buf;
static u64 random_seed;
/*
* This is a simple linear congruential generator. It is used only for testing,
* which does not require cryptographically secure random numbers. A hard-coded
* algorithm is used instead of <linux/prandom.h> so that it matches the
* algorithm used by the test vector generation script. This allows the input
* data in random test vectors to be concisely stored as just the seed.
*/
static u32 rand32(void)
{
random_seed = (random_seed * 25214903917 + 11) & ((1ULL << 48) - 1);
return random_seed >> 16;
}
static void rand_bytes(u8 *out, size_t len)
{
for (size_t i = 0; i < len; i++)
out[i] = rand32();
}
static void rand_bytes_seeded_from_len(u8 *out, size_t len)
{
random_seed = len;
rand_bytes(out, len);
}
static bool rand_bool(void)
{
return rand32() % 2;
}
/* Generate a random length, preferring small lengths. */
static size_t rand_length(size_t max_len)
{
size_t len;
switch (rand32() % 3) {
case 0:
len = rand32() % 128;
break;
case 1:
len = rand32() % 3072;
break;
default:
len = rand32();
break;
}
return len % (max_len + 1);
}
static size_t rand_offset(size_t max_offset)
{
return min(rand32() % 128, max_offset);
}
static int hash_suite_init(struct kunit_suite *suite)
{
/*
* Allocate the test buffer using vmalloc() with a page-aligned length
* so that it is immediately followed by a guard page. This allows
* buffer overreads to be detected, even in assembly code.
*/
size_t alloc_len = round_up(TEST_BUF_LEN, PAGE_SIZE);
orig_test_buf = vmalloc(alloc_len);
if (!orig_test_buf)
return -ENOMEM;
test_buf = orig_test_buf + alloc_len - TEST_BUF_LEN;
return 0;
}
static void hash_suite_exit(struct kunit_suite *suite)
{
vfree(orig_test_buf);
orig_test_buf = NULL;
test_buf = NULL;
}
/*
* Test the hash function against a list of test vectors.
*
* Note that it's only necessary to run each test vector in one way (e.g.,
* one-shot instead of incremental), since consistency between different ways of
* using the APIs is verified by other test cases.
*/
static void test_hash_test_vectors(struct kunit *test)
{
for (size_t i = 0; i < ARRAY_SIZE(hash_testvecs); i++) {
size_t data_len = hash_testvecs[i].data_len;
u8 actual_hash[HASH_SIZE];
KUNIT_ASSERT_LE(test, data_len, TEST_BUF_LEN);
rand_bytes_seeded_from_len(test_buf, data_len);
HASH(test_buf, data_len, actual_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, actual_hash, hash_testvecs[i].digest, HASH_SIZE,
"Wrong result with test vector %zu; data_len=%zu", i,
data_len);
}
}
/*
* Test that the hash function produces correct results for *every* length up to
* 4096 bytes. To do this, generate seeded random data, then calculate a hash
* value for each length 0..4096, then hash the hash values. Verify just the
* final hash value, which should match only when all hash values were correct.
*/
static void test_hash_all_lens_up_to_4096(struct kunit *test)
{
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
static_assert(TEST_BUF_LEN >= 4096);
rand_bytes_seeded_from_len(test_buf, 4096);
HASH_INIT(&ctx);
for (size_t len = 0; len <= 4096; len++) {
HASH(test_buf, len, hash);
HASH_UPDATE(&ctx, hash, HASH_SIZE);
}
HASH_FINAL(&ctx, hash);
KUNIT_ASSERT_MEMEQ(test, hash, hash_testvec_consolidated, HASH_SIZE);
}
/*
* Test that the hash function produces the same result with a one-shot
* computation as it does with an incremental computation.
*/
static void test_hash_incremental_updates(struct kunit *test)
{
for (int i = 0; i < 1000; i++) {
size_t total_len, offset;
struct HASH_CTX ctx;
u8 hash1[HASH_SIZE];
u8 hash2[HASH_SIZE];
size_t num_parts = 0;
size_t remaining_len, cur_offset;
total_len = rand_length(TEST_BUF_LEN);
offset = rand_offset(TEST_BUF_LEN - total_len);
rand_bytes(&test_buf[offset], total_len);
/* Compute the hash value in one shot. */
HASH(&test_buf[offset], total_len, hash1);
/*
* Compute the hash value incrementally, using a randomly
* selected sequence of update lengths that sum to total_len.
*/
HASH_INIT(&ctx);
remaining_len = total_len;
cur_offset = offset;
while (rand_bool()) {
size_t part_len = rand_length(remaining_len);
HASH_UPDATE(&ctx, &test_buf[cur_offset], part_len);
num_parts++;
cur_offset += part_len;
remaining_len -= part_len;
}
if (remaining_len != 0 || rand_bool()) {
HASH_UPDATE(&ctx, &test_buf[cur_offset], remaining_len);
num_parts++;
}
HASH_FINAL(&ctx, hash2);
/* Verify that the two hash values are the same. */
KUNIT_ASSERT_MEMEQ_MSG(
test, hash1, hash2, HASH_SIZE,
"Incremental test failed with total_len=%zu num_parts=%zu offset=%zu",
total_len, num_parts, offset);
}
}
/*
* Test that the hash function does not overrun any buffers. Uses a guard page
* to catch buffer overruns even if they occur in assembly code.
*/
static void test_hash_buffer_overruns(struct kunit *test)
{
const size_t max_tested_len = TEST_BUF_LEN - sizeof(struct HASH_CTX);
void *const buf_end = &test_buf[TEST_BUF_LEN];
struct HASH_CTX *guarded_ctx = buf_end - sizeof(*guarded_ctx);
rand_bytes(test_buf, TEST_BUF_LEN);
for (int i = 0; i < 100; i++) {
size_t len = rand_length(max_tested_len);
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
/* Check for overruns of the data buffer. */
HASH(buf_end - len, len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, buf_end - len, len);
HASH_FINAL(&ctx, hash);
/* Check for overruns of the hash value buffer. */
HASH(test_buf, len, buf_end - HASH_SIZE);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, test_buf, len);
HASH_FINAL(&ctx, buf_end - HASH_SIZE);
/* Check for overuns of the hash context. */
HASH_INIT(guarded_ctx);
HASH_UPDATE(guarded_ctx, test_buf, len);
HASH_FINAL(guarded_ctx, hash);
}
}
/*
* Test that the caller is permitted to alias the output digest and source data
* buffer, and also modify the source data buffer after it has been used.
*/
static void test_hash_overlaps(struct kunit *test)
{
const size_t max_tested_len = TEST_BUF_LEN - HASH_SIZE;
struct HASH_CTX ctx;
u8 hash[HASH_SIZE];
rand_bytes(test_buf, TEST_BUF_LEN);
for (int i = 0; i < 100; i++) {
size_t len = rand_length(max_tested_len);
size_t offset = HASH_SIZE + rand_offset(max_tested_len - len);
bool left_end = rand_bool();
u8 *ovl_hash = left_end ? &test_buf[offset] :
&test_buf[offset + len - HASH_SIZE];
HASH(&test_buf[offset], len, hash);
HASH(&test_buf[offset], len, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 1 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
/* Repeat the above test, but this time use init+update+final */
HASH(&test_buf[offset], len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, &test_buf[offset], len);
HASH_FINAL(&ctx, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 2 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
/* Test modifying the source data after it was used. */
HASH(&test_buf[offset], len, hash);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, &test_buf[offset], len);
rand_bytes(&test_buf[offset], len);
HASH_FINAL(&ctx, ovl_hash);
KUNIT_ASSERT_MEMEQ_MSG(
test, hash, ovl_hash, HASH_SIZE,
"Overlap test 3 failed with len=%zu offset=%zu left_end=%d",
len, offset, left_end);
}
}
/*
* Test that if the same data is hashed at different alignments in memory, the
* results are the same.
*/
static void test_hash_alignment_consistency(struct kunit *test)
{
u8 hash1[128 + HASH_SIZE];
u8 hash2[128 + HASH_SIZE];
for (int i = 0; i < 100; i++) {
size_t len = rand_length(TEST_BUF_LEN);
size_t data_offs1 = rand_offset(TEST_BUF_LEN - len);
size_t data_offs2 = rand_offset(TEST_BUF_LEN - len);
size_t hash_offs1 = rand_offset(128);
size_t hash_offs2 = rand_offset(128);
rand_bytes(&test_buf[data_offs1], len);
HASH(&test_buf[data_offs1], len, &hash1[hash_offs1]);
memmove(&test_buf[data_offs2], &test_buf[data_offs1], len);
HASH(&test_buf[data_offs2], len, &hash2[hash_offs2]);
KUNIT_ASSERT_MEMEQ_MSG(
test, &hash1[hash_offs1], &hash2[hash_offs2], HASH_SIZE,
"Alignment consistency test failed with len=%zu data_offs=(%zu,%zu) hash_offs=(%zu,%zu)",
len, data_offs1, data_offs2, hash_offs1, hash_offs2);
}
}
/* Test that HASH_FINAL zeroizes the context. */
static void test_hash_ctx_zeroization(struct kunit *test)
{
static const u8 zeroes[sizeof(struct HASH_CTX)];
struct HASH_CTX ctx;
rand_bytes(test_buf, 128);
HASH_INIT(&ctx);
HASH_UPDATE(&ctx, test_buf, 128);
HASH_FINAL(&ctx, test_buf);
KUNIT_ASSERT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
"Hash context was not zeroized by finalization");
}
#define IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
struct hash_irq_test_state {
bool (*func)(void *test_specific_state);
void *test_specific_state;
bool task_func_reported_failure;
bool hardirq_func_reported_failure;
bool softirq_func_reported_failure;
unsigned long hardirq_func_calls;
unsigned long softirq_func_calls;
struct hrtimer timer;
struct work_struct bh_work;
};
static enum hrtimer_restart hash_irq_test_timer_func(struct hrtimer *timer)
{
struct hash_irq_test_state *state =
container_of(timer, typeof(*state), timer);
WARN_ON_ONCE(!in_hardirq());
state->hardirq_func_calls++;
if (!state->func(state->test_specific_state))
state->hardirq_func_reported_failure = true;
hrtimer_forward_now(&state->timer, IRQ_TEST_HRTIMER_INTERVAL);
queue_work(system_bh_wq, &state->bh_work);
return HRTIMER_RESTART;
}
static void hash_irq_test_bh_work_func(struct work_struct *work)
{
struct hash_irq_test_state *state =
container_of(work, typeof(*state), bh_work);
WARN_ON_ONCE(!in_serving_softirq());
state->softirq_func_calls++;
if (!state->func(state->test_specific_state))
state->softirq_func_reported_failure = true;
}
/*
* Helper function which repeatedly runs the given @func in task, softirq, and
* hardirq context concurrently, and reports a failure to KUnit if any
* invocation of @func in any context returns false. @func is passed
* @test_specific_state as its argument. At most 3 invocations of @func will
* run concurrently: one in each of task, softirq, and hardirq context.
*
* The main purpose of this interrupt context testing is to validate fallback
* code paths that run in contexts where the normal code path cannot be used,
* typically due to the FPU or vector registers already being in-use in kernel
* mode. These code paths aren't covered when the test code is executed only by
* the KUnit test runner thread in task context. The reason for the concurrency
* is because merely using hardirq context is not sufficient to reach a fallback
* code path on some architectures; the hardirq actually has to occur while the
* FPU or vector unit was already in-use in kernel mode.
*
* Another purpose of this testing is to detect issues with the architecture's
* irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
* especially in softirq context when the softirq may have interrupted a task
* already using kernel-mode FPU or vector (if the arch didn't prevent that).
* Crypto functions are often executed in softirqs, so this is important.
*/
static void run_irq_test(struct kunit *test, bool (*func)(void *),
int max_iterations, void *test_specific_state)
{
struct hash_irq_test_state state = {
.func = func,
.test_specific_state = test_specific_state,
};
unsigned long end_jiffies;
/*
* Set up a hrtimer (the way we access hardirq context) and a work
* struct for the BH workqueue (the way we access softirq context).
*/
hrtimer_setup_on_stack(&state.timer, hash_irq_test_timer_func,
CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
INIT_WORK_ONSTACK(&state.bh_work, hash_irq_test_bh_work_func);
/* Run for up to max_iterations or 1 second, whichever comes first. */
end_jiffies = jiffies + HZ;
hrtimer_start(&state.timer, IRQ_TEST_HRTIMER_INTERVAL,
HRTIMER_MODE_REL_HARD);
for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
i++) {
if (!func(test_specific_state))
state.task_func_reported_failure = true;
}
/* Cancel the timer and work. */
hrtimer_cancel(&state.timer);
flush_work(&state.bh_work);
/* Sanity check: the timer and BH functions should have been run. */
KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
"Timer function was not called");
KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
"BH work function was not called");
/* Check for incorrect hash values reported from any context. */
KUNIT_EXPECT_FALSE_MSG(
test, state.task_func_reported_failure,
"Incorrect hash values reported from task context");
KUNIT_EXPECT_FALSE_MSG(
test, state.hardirq_func_reported_failure,
"Incorrect hash values reported from hardirq context");
KUNIT_EXPECT_FALSE_MSG(
test, state.softirq_func_reported_failure,
"Incorrect hash values reported from softirq context");
}
#define IRQ_TEST_DATA_LEN 256
#define IRQ_TEST_NUM_BUFFERS 3 /* matches max concurrency level */
struct hash_irq_test1_state {
u8 expected_hashes[IRQ_TEST_NUM_BUFFERS][HASH_SIZE];
atomic_t seqno;
};
/*
* Compute the hash of one of the test messages and verify that it matches the
* expected hash from @state->expected_hashes. To increase the chance of
* detecting problems, cycle through multiple messages.
*/
static bool hash_irq_test1_func(void *state_)
{
struct hash_irq_test1_state *state = state_;
u32 i = (u32)atomic_inc_return(&state->seqno) % IRQ_TEST_NUM_BUFFERS;
u8 actual_hash[HASH_SIZE];
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN, actual_hash);
return memcmp(actual_hash, state->expected_hashes[i], HASH_SIZE) == 0;
}
/*
* Test that if hashes are computed in task, softirq, and hardirq context
* concurrently, then all results are as expected.
*/
static void test_hash_interrupt_context_1(struct kunit *test)
{
struct hash_irq_test1_state state = {};
/* Prepare some test messages and compute the expected hash of each. */
rand_bytes(test_buf, IRQ_TEST_NUM_BUFFERS * IRQ_TEST_DATA_LEN);
for (int i = 0; i < IRQ_TEST_NUM_BUFFERS; i++)
HASH(&test_buf[i * IRQ_TEST_DATA_LEN], IRQ_TEST_DATA_LEN,
state.expected_hashes[i]);
run_irq_test(test, hash_irq_test1_func, 100000, &state);
}
struct hash_irq_test2_hash_ctx {
struct HASH_CTX hash_ctx;
atomic_t in_use;
int offset;
int step;
};
struct hash_irq_test2_state {
struct hash_irq_test2_hash_ctx ctxs[IRQ_TEST_NUM_BUFFERS];
u8 expected_hash[HASH_SIZE];
u16 update_lens[32];
int num_steps;
};
static bool hash_irq_test2_func(void *state_)
{
struct hash_irq_test2_state *state = state_;
struct hash_irq_test2_hash_ctx *ctx;
bool ret = true;
for (ctx = &state->ctxs[0]; ctx < &state->ctxs[ARRAY_SIZE(state->ctxs)];
ctx++) {
if (atomic_cmpxchg(&ctx->in_use, 0, 1) == 0)
break;
}
if (WARN_ON_ONCE(ctx == &state->ctxs[ARRAY_SIZE(state->ctxs)])) {
/*
* This should never happen, as the number of contexts is equal
* to the maximum concurrency level of run_irq_test().
*/
return false;
}
if (ctx->step == 0) {
/* Init step */
HASH_INIT(&ctx->hash_ctx);
ctx->offset = 0;
ctx->step++;
} else if (ctx->step < state->num_steps - 1) {
/* Update step */
HASH_UPDATE(&ctx->hash_ctx, &test_buf[ctx->offset],
state->update_lens[ctx->step - 1]);
ctx->offset += state->update_lens[ctx->step - 1];
ctx->step++;
} else {
/* Final step */
u8 actual_hash[HASH_SIZE];
if (WARN_ON_ONCE(ctx->offset != TEST_BUF_LEN))
ret = false;
HASH_FINAL(&ctx->hash_ctx, actual_hash);
if (memcmp(actual_hash, state->expected_hash, HASH_SIZE) != 0)
ret = false;
ctx->step = 0;
}
atomic_set_release(&ctx->in_use, 0);
return ret;
}
/*
* Test that if hashes are computed in task, softirq, and hardirq context
* concurrently, *including doing different parts of the same incremental
* computation in different contexts*, then all results are as expected.
* Besides detecting bugs similar to those that test_hash_interrupt_context_1
* can detect, this test case can also detect bugs where hash function
* implementations don't correctly handle these mixed incremental computations.
*/
static void test_hash_interrupt_context_2(struct kunit *test)
{
struct hash_irq_test2_state *state;
int remaining = TEST_BUF_LEN;
state = kunit_kzalloc(test, sizeof(*state), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, state);
rand_bytes(test_buf, TEST_BUF_LEN);
HASH(test_buf, TEST_BUF_LEN, state->expected_hash);
/*
* Generate a list of update lengths to use. Ensure that it contains
* multiple entries but is limited to a maximum length.
*/
static_assert(TEST_BUF_LEN / 4096 > 1);
for (state->num_steps = 0;
state->num_steps < ARRAY_SIZE(state->update_lens) - 1 && remaining;
state->num_steps++) {
state->update_lens[state->num_steps] =
rand_length(min(remaining, 4096));
remaining -= state->update_lens[state->num_steps];
}
if (remaining)
state->update_lens[state->num_steps++] = remaining;
state->num_steps += 2; /* for init and final */
run_irq_test(test, hash_irq_test2_func, 250000, state);
}
#define UNKEYED_HASH_KUNIT_CASES \
KUNIT_CASE(test_hash_test_vectors), \
KUNIT_CASE(test_hash_all_lens_up_to_4096), \
KUNIT_CASE(test_hash_incremental_updates), \
KUNIT_CASE(test_hash_buffer_overruns), \
KUNIT_CASE(test_hash_overlaps), \
KUNIT_CASE(test_hash_alignment_consistency), \
KUNIT_CASE(test_hash_ctx_zeroization), \
KUNIT_CASE(test_hash_interrupt_context_1), \
KUNIT_CASE(test_hash_interrupt_context_2)
/* benchmark_hash is omitted so that the suites can put it last. */
#ifdef HMAC
/*
* Test the corresponding HMAC variant.
*
* This test case is fairly short, since HMAC is just a simple C wrapper around
* the underlying unkeyed hash function, which is already well-tested by the
* other test cases. It's not useful to test things like data alignment or
* interrupt context again for HMAC, nor to have a long list of test vectors.
*
* Thus, just do a single consolidated test, which covers all data lengths up to
* 4096 bytes and all key lengths up to 292 bytes. For each data length, select
* a key length, generate the inputs from a seed, and compute the HMAC value.
* Concatenate all these HMAC values together, and compute the HMAC of that.
* Verify that value. If this fails, then the HMAC implementation is wrong.
* This won't show which specific input failed, but that should be fine. Any
* failure would likely be non-input-specific or also show in the unkeyed tests.
*/
static void test_hmac(struct kunit *test)
{
static const u8 zeroes[sizeof(struct HMAC_CTX)];
u8 *raw_key;
struct HMAC_KEY key;
struct HMAC_CTX ctx;
u8 mac[HASH_SIZE];
u8 mac2[HASH_SIZE];
static_assert(TEST_BUF_LEN >= 4096 + 293);
rand_bytes_seeded_from_len(test_buf, 4096);
raw_key = &test_buf[4096];
rand_bytes_seeded_from_len(raw_key, 32);
HMAC_PREPAREKEY(&key, raw_key, 32);
HMAC_INIT(&ctx, &key);
for (size_t data_len = 0; data_len <= 4096; data_len++) {
/*
* Cycle through key lengths as well. Somewhat arbitrarily go
* up to 293, which is somewhat larger than the largest hash
* block size (which is the size at which the key starts being
* hashed down to one block); going higher would not be useful.
* To reduce correlation with data_len, use a prime number here.
*/
size_t key_len = data_len % 293;
HMAC_UPDATE(&ctx, test_buf, data_len);
rand_bytes_seeded_from_len(raw_key, key_len);
HMAC_USINGRAWKEY(raw_key, key_len, test_buf, data_len, mac);
HMAC_UPDATE(&ctx, mac, HASH_SIZE);
/* Verify that HMAC() is consistent with HMAC_USINGRAWKEY(). */
HMAC_PREPAREKEY(&key, raw_key, key_len);
HMAC(&key, test_buf, data_len, mac2);
KUNIT_ASSERT_MEMEQ_MSG(
test, mac, mac2, HASH_SIZE,
"HMAC gave different results with raw and prepared keys");
}
HMAC_FINAL(&ctx, mac);
KUNIT_EXPECT_MEMEQ_MSG(test, mac, hmac_testvec_consolidated, HASH_SIZE,
"HMAC gave wrong result");
KUNIT_EXPECT_MEMEQ_MSG(test, &ctx, zeroes, sizeof(ctx),
"HMAC context was not zeroized by finalization");
}
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES, KUNIT_CASE(test_hmac)
#else
#define HASH_KUNIT_CASES UNKEYED_HASH_KUNIT_CASES
#endif
/* Benchmark the hash function on various data lengths. */
static void benchmark_hash(struct kunit *test)
{
static const size_t lens_to_test[] = {
1, 16, 64, 127, 128, 200, 256,
511, 512, 1024, 3173, 4096, 16384,
};
u8 hash[HASH_SIZE];
if (!IS_ENABLED(CONFIG_CRYPTO_LIB_BENCHMARK))
kunit_skip(test, "not enabled");
/* Warm-up */
for (size_t i = 0; i < 10000000; i += TEST_BUF_LEN)
HASH(test_buf, TEST_BUF_LEN, hash);
for (size_t i = 0; i < ARRAY_SIZE(lens_to_test); i++) {
size_t len = lens_to_test[i];
/* The '+ 128' tries to account for per-message overhead. */
size_t num_iters = 10000000 / (len + 128);
u64 t;
KUNIT_ASSERT_LE(test, len, TEST_BUF_LEN);
preempt_disable();
t = ktime_get_ns();
for (size_t j = 0; j < num_iters; j++)
HASH(test_buf, len, hash);
t = ktime_get_ns() - t;
preempt_enable();
kunit_info(test, "len=%zu: %llu MB/s", len,
div64_u64((u64)len * num_iters * 1000, t ?: 1));
}
}
|