tcrypt.c 52.1 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8
 * Quick & dirty crypto testing module.
 *
 * This will only exist until we have a better testing mechanism
 * (e.g. a char device).
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
9
 * Copyright (c) 2007 Nokia Siemens Networks
Linus Torvalds's avatar
Linus Torvalds committed
10
 *
11 12 13 14 15 16 17
 * Updated RFC4106 AES-GCM testing.
 *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
 *             Adrian Hoban <adrian.hoban@intel.com>
 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 *             Tadeusz Struk (tadeusz.struk@intel.com)
 *             Copyright (c) 2010, Intel Corporation.
 *
Linus Torvalds's avatar
Linus Torvalds committed
18 19
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
20
 * Software Foundation; either version 2 of the License, or (at your option)
Linus Torvalds's avatar
Linus Torvalds committed
21 22 23 24
 * any later version.
 *
 */

25 26
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

27
#include <crypto/aead.h>
28
#include <crypto/hash.h>
Herbert Xu's avatar
Herbert Xu committed
29
#include <crypto/skcipher.h>
30
#include <linux/err.h>
31
#include <linux/fips.h>
Linus Torvalds's avatar
Linus Torvalds committed
32
#include <linux/init.h>
33
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
34
#include <linux/module.h>
35
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/string.h>
#include <linux/moduleparam.h>
38
#include <linux/jiffies.h>
39 40
#include <linux/timex.h>
#include <linux/interrupt.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43
#include "tcrypt.h"

/*
44
 * Need slab memory for testing (size in number of pages).
Linus Torvalds's avatar
Linus Torvalds committed
45
 */
46
#define TVMEMSIZE	4
Linus Torvalds's avatar
Linus Torvalds committed
47 48

/*
49
* Used by test_cipher_speed()
Linus Torvalds's avatar
Linus Torvalds committed
50 51 52 53
*/
#define ENCRYPT 1
#define DECRYPT 0

54 55
#define MAX_DIGEST_SIZE		64

56 57 58 59 60
/*
 * return a string with the driver name
 */
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))

61 62 63
/*
 * Used by test_cipher_speed()
 */
64
static unsigned int sec;
65

66 67
static char *alg = NULL;
static u32 type;
68
static u32 mask;
Linus Torvalds's avatar
Linus Torvalds committed
69
static int mode;
70
static char *tvmem[TVMEMSIZE];
Linus Torvalds's avatar
Linus Torvalds committed
71 72

static char *check[] = {
73
	"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
74 75
	"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
	"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
76
	"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta",  "fcrypt",
77
	"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
78 79
	"lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
	NULL
Linus Torvalds's avatar
Linus Torvalds committed
80 81
};

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
struct tcrypt_result {
	struct completion completion;
	int err;
};

static void tcrypt_complete(struct crypto_async_request *req, int err)
{
	struct tcrypt_result *res = req->data;

	if (err == -EINPROGRESS)
		return;

	res->err = err;
	complete(&res->completion);
}

static inline int do_one_aead_op(struct aead_request *req, int ret)
{
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		struct tcrypt_result *tr = req->base.data;

		ret = wait_for_completion_interruptible(&tr->completion);
		if (!ret)
			ret = tr->err;
		reinit_completion(&tr->completion);
	}

	return ret;
}

112
static int test_aead_jiffies(struct aead_request *req, int enc,
Mark Rustad's avatar
Mark Rustad committed
113
				int blen, int secs)
114 115 116 117 118
{
	unsigned long start, end;
	int bcount;
	int ret;

Mark Rustad's avatar
Mark Rustad committed
119
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
120 121
	     time_before(jiffies, end); bcount++) {
		if (enc)
122
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
123
		else
124
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
125 126 127 128 129 130

		if (ret)
			return ret;
	}

	printk("%d operations in %d seconds (%ld bytes)\n",
Mark Rustad's avatar
Mark Rustad committed
131
	       bcount, secs, (long)bcount * blen);
132 133 134 135 136 137 138 139 140 141 142 143
	return 0;
}

static int test_aead_cycles(struct aead_request *req, int enc, int blen)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		if (enc)
144
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
145
		else
146
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
147 148 149 150 151 152 153 154 155 156 157

		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
		if (enc)
158
			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
159
		else
160
			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

out:
	if (ret == 0)
		printk("1 operation in %lu cycles (%d bytes)\n",
		       (cycles + 4) / 8, blen);

	return ret;
}

177
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };

#define XBUFSIZE 8
#define MAX_IVLEN 32

static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++) {
		buf[i] = (void *)__get_free_page(GFP_KERNEL);
		if (!buf[i])
			goto err_free_buf;
	}

	return 0;

err_free_buf:
	while (i-- > 0)
		free_page((unsigned long)buf[i]);

	return -ENOMEM;
}

static void testmgr_free_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++)
		free_page((unsigned long)buf[i]);
}

static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
			unsigned int buflen)
{
	int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
	int k, rem;

	if (np > XBUFSIZE) {
		rem = PAGE_SIZE;
		np = XBUFSIZE;
219 220
	} else {
		rem = buflen % PAGE_SIZE;
221
	}
222

223
	sg_init_table(sg, np + 1);
224 225
	np--;
	for (k = 0; k < np; k++)
226
		sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
227

228
	sg_set_buf(&sg[k + 1], xbuf[k], rem);
229 230
}

Mark Rustad's avatar
Mark Rustad committed
231
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
232 233 234 235 236 237 238 239 240 241 242 243 244
			    struct aead_speed_template *template,
			    unsigned int tcount, u8 authsize,
			    unsigned int aad_size, u8 *keysize)
{
	unsigned int i, j;
	struct crypto_aead *tfm;
	int ret = -ENOMEM;
	const char *key;
	struct aead_request *req;
	struct scatterlist *sg;
	struct scatterlist *sgout;
	const char *e;
	void *assoc;
245
	char *iv;
246 247 248 249 250
	char *xbuf[XBUFSIZE];
	char *xoutbuf[XBUFSIZE];
	char *axbuf[XBUFSIZE];
	unsigned int *b_size;
	unsigned int iv_len;
251
	struct tcrypt_result result;
252

253 254 255 256
	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
	if (!iv)
		return;

257 258
	if (aad_size >= PAGE_SIZE) {
		pr_err("associate data length (%u) too big\n", aad_size);
259
		goto out_noxbuf;
260 261
	}

262 263 264 265 266 267 268 269 270 271 272 273
	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	if (testmgr_alloc_buf(xbuf))
		goto out_noxbuf;
	if (testmgr_alloc_buf(axbuf))
		goto out_noaxbuf;
	if (testmgr_alloc_buf(xoutbuf))
		goto out_nooutbuf;

274
	sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
275 276
	if (!sg)
		goto out_nosg;
277
	sgout = &sg[9];
278

279
	tfm = crypto_alloc_aead(algo, 0, 0);
280 281 282 283

	if (IS_ERR(tfm)) {
		pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
		       PTR_ERR(tfm));
284
		goto out_notfm;
285 286
	}

287
	init_completion(&result.completion);
288 289 290
	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
			get_driver_name(crypto_aead, tfm), e);

291 292 293 294
	req = aead_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		pr_err("alg: aead: Failed to allocate request for %s\n",
		       algo);
295
		goto out_noreq;
296 297
	}

298 299 300
	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				  tcrypt_complete, &result);

301 302 303 304 305
	i = 0;
	do {
		b_size = aead_sizes;
		do {
			assoc = axbuf[0];
306
			memset(assoc, 0xff, aad_size);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
				pr_err("template (%u) too big for tvmem (%lu)\n",
				       *keysize + *b_size,
					TVMEMSIZE * PAGE_SIZE);
				goto out;
			}

			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}
			ret = crypto_aead_setkey(tfm, key, *keysize);
			ret = crypto_aead_setauthsize(tfm, authsize);

			iv_len = crypto_aead_ivsize(tfm);
			if (iv_len)
327
				memset(iv, 0xff, iv_len);
328 329 330 331 332 333 334 335 336 337 338 339 340 341

			crypto_aead_clear_flags(tfm, ~0);
			printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
					i, *keysize * 8, *b_size);


			memset(tvmem[0], 0xff, PAGE_SIZE);

			if (ret) {
				pr_err("setkey() failed flags=%x\n",
						crypto_aead_get_flags(tfm));
				goto out;
			}

342
			sg_init_aead(sg, xbuf,
343 344
				    *b_size + (enc ? authsize : 0));

345
			sg_init_aead(sgout, xoutbuf,
346 347
				    *b_size + (enc ? authsize : 0));

348 349 350
			sg_set_buf(&sg[0], assoc, aad_size);
			sg_set_buf(&sgout[0], assoc, aad_size);

351
			aead_request_set_crypt(req, sg, sgout, *b_size, iv);
352
			aead_request_set_ad(req, aad_size);
353

Mark Rustad's avatar
Mark Rustad committed
354 355 356
			if (secs)
				ret = test_aead_jiffies(req, enc, *b_size,
							secs);
357 358 359 360 361 362 363 364 365 366 367 368 369 370
			else
				ret = test_aead_cycles(req, enc, *b_size);

			if (ret) {
				pr_err("%s() failed return code=%d\n", e, ret);
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out:
371 372
	aead_request_free(req);
out_noreq:
373
	crypto_free_aead(tfm);
374
out_notfm:
375 376 377 378 379 380 381 382
	kfree(sg);
out_nosg:
	testmgr_free_buf(xoutbuf);
out_nooutbuf:
	testmgr_free_buf(axbuf);
out_noaxbuf:
	testmgr_free_buf(xbuf);
out_noxbuf:
383
	kfree(iv);
384
}
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void test_hash_sg_init(struct scatterlist *sg)
{
	int i;

	sg_init_table(sg, TVMEMSIZE);
	for (i = 0; i < TVMEMSIZE; i++) {
		sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
		memset(tvmem[i], 0xff, PAGE_SIZE);
	}
}

static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		struct tcrypt_result *tr = req->base.data;

402
		wait_for_completion(&tr->completion);
403
		reinit_completion(&tr->completion);
404
		ret = tr->err;
405 406 407 408
	}
	return ret;
}

409 410 411 412 413 414 415
struct test_mb_ahash_data {
	struct scatterlist sg[TVMEMSIZE];
	char result[64];
	struct ahash_request *req;
	struct tcrypt_result tresult;
	char *xbuf[XBUFSIZE];
};
416 417

static void test_mb_ahash_speed(const char *algo, unsigned int sec,
418
				struct hash_speed *speed)
419
{
420
	struct test_mb_ahash_data *data;
421
	struct crypto_ahash *tfm;
422
	unsigned long start, end;
423
	unsigned long cycles;
424 425 426 427 428 429
	unsigned int i, j, k;
	int ret;

	data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
	if (!data)
		return;
430 431 432 433 434

	tfm = crypto_alloc_ahash(algo, 0, 0);
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
			algo, PTR_ERR(tfm));
435
		goto free_data;
436
	}
437

438
	for (i = 0; i < 8; ++i) {
439 440
		if (testmgr_alloc_buf(data[i].xbuf))
			goto out;
441

442
		init_completion(&data[i].tresult.completion);
443

444 445
		data[i].req = ahash_request_alloc(tfm, GFP_KERNEL);
		if (!data[i].req) {
446 447
			pr_err("alg: hash: Failed to allocate request for %s\n",
			       algo);
448
			goto out;
449 450
		}

451 452 453
		ahash_request_set_callback(data[i].req, 0,
					   tcrypt_complete, &data[i].tresult);
		test_hash_sg_init(data[i].sg);
454 455
	}

456 457
	pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
		get_driver_name(crypto_ahash, tfm));
458 459

	for (i = 0; speed[i].blen != 0; i++) {
460 461 462 463
		/* For some reason this only tests digests. */
		if (speed[i].blen != speed[i].plen)
			continue;

464
		if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
465 466 467
			pr_err("template (%u) too big for tvmem (%lu)\n",
			       speed[i].blen, TVMEMSIZE * PAGE_SIZE);
			goto out;
468 469 470 471 472
		}

		if (speed[i].klen)
			crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);

473 474 475
		for (k = 0; k < 8; k++)
			ahash_request_set_crypt(data[k].req, data[k].sg,
						data[k].result, speed[i].blen);
476

477 478
		pr_info("test%3u "
			"(%5u byte blocks,%5u bytes per update,%4u updates): ",
479 480 481
			i, speed[i].blen, speed[i].plen,
			speed[i].blen / speed[i].plen);

482 483 484 485
		start = get_cycles();

		for (k = 0; k < 8; k++) {
			ret = crypto_ahash_digest(data[k].req);
486 487
			if (ret == -EINPROGRESS) {
				ret = 0;
488
				continue;
489
			}
490 491 492 493 494 495

			if (ret)
				break;

			complete(&data[k].tresult.completion);
			data[k].tresult.err = 0;
496 497
		}

498 499
		for (j = 0; j < k; j++) {
			struct tcrypt_result *tr = &data[j].tresult;
500

501 502 503
			wait_for_completion(&tr->completion);
			if (tr->err)
				ret = tr->err;
504 505
		}

506 507 508 509 510 511 512 513 514
		end = get_cycles();
		cycles = end - start;
		pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
			cycles, cycles / (8 * speed[i].blen));

		if (ret) {
			pr_err("At least one hashing failed ret=%d\n", ret);
			break;
		}
515 516 517 518
	}

out:
	for (k = 0; k < 8; ++k)
519 520
		ahash_request_free(data[k].req);

521
	for (k = 0; k < 8; ++k)
522 523 524 525 526 527
		testmgr_free_buf(data[k].xbuf);

	crypto_free_ahash(tfm);

free_data:
	kfree(data);
528 529
}

530
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
Mark Rustad's avatar
Mark Rustad committed
531
				     char *out, int secs)
532 533 534 535 536
{
	unsigned long start, end;
	int bcount;
	int ret;

Mark Rustad's avatar
Mark Rustad committed
537
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
538 539 540 541 542 543 544
	     time_before(jiffies, end); bcount++) {
		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			return ret;
	}

	printk("%6u opers/sec, %9lu bytes/sec\n",
Mark Rustad's avatar
Mark Rustad committed
545
	       bcount / secs, ((long)bcount * blen) / secs);
546 547 548 549 550

	return 0;
}

static int test_ahash_jiffies(struct ahash_request *req, int blen,
Mark Rustad's avatar
Mark Rustad committed
551
			      int plen, char *out, int secs)
552 553 554 555 556 557
{
	unsigned long start, end;
	int bcount, pcount;
	int ret;

	if (plen == blen)
Mark Rustad's avatar
Mark Rustad committed
558
		return test_ahash_jiffies_digest(req, blen, out, secs);
559

Mark Rustad's avatar
Mark Rustad committed
560
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
561
	     time_before(jiffies, end); bcount++) {
562
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
563 564 565 566 567 568 569 570 571 572 573 574 575 576
		if (ret)
			return ret;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				return ret;
		}
		/* we assume there is enough space in 'out' for the result */
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			return ret;
	}

	pr_cont("%6u opers/sec, %9lu bytes/sec\n",
Mark Rustad's avatar
Mark Rustad committed
577
		bcount / secs, ((long)bcount * blen) / secs);
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

	return 0;
}

static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
				    char *out)
{
	unsigned long cycles = 0;
	int ret, i;

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();

		ret = do_one_ahash_op(req, crypto_ahash_digest(req));
		if (ret)
			goto out;

		end = get_cycles();

		cycles += end - start;
	}

out:
	if (ret)
		return ret;

	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
		cycles / 8, cycles / (8 * blen));

	return 0;
}

static int test_ahash_cycles(struct ahash_request *req, int blen,
			     int plen, char *out)
{
	unsigned long cycles = 0;
	int i, pcount, ret;

	if (plen == blen)
		return test_ahash_cycles_digest(req, blen, out);

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
631
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
		if (ret)
			goto out;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				goto out;
		}
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();

650
		ret = do_one_ahash_op(req, crypto_ahash_init(req));
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		if (ret)
			goto out;
		for (pcount = 0; pcount < blen; pcount += plen) {
			ret = do_one_ahash_op(req, crypto_ahash_update(req));
			if (ret)
				goto out;
		}
		ret = do_one_ahash_op(req, crypto_ahash_final(req));
		if (ret)
			goto out;

		end = get_cycles();

		cycles += end - start;
	}

out:
	if (ret)
		return ret;

	pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
		cycles / 8, cycles / (8 * blen));

	return 0;
}

Herbert Xu's avatar
Herbert Xu committed
677 678
static void test_ahash_speed_common(const char *algo, unsigned int secs,
				    struct hash_speed *speed, unsigned mask)
679 680 681 682 683
{
	struct scatterlist sg[TVMEMSIZE];
	struct tcrypt_result tresult;
	struct ahash_request *req;
	struct crypto_ahash *tfm;
684
	char *output;
685 686
	int i, ret;

Herbert Xu's avatar
Herbert Xu committed
687
	tfm = crypto_alloc_ahash(algo, 0, mask);
688 689 690 691 692 693
	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n",
		       algo, PTR_ERR(tfm));
		return;
	}

694 695 696
	printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
			get_driver_name(crypto_ahash, tfm));

697 698 699
	if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
		pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
		       MAX_DIGEST_SIZE);
700 701 702 703 704 705 706 707 708 709 710 711 712 713
		goto out;
	}

	test_hash_sg_init(sg);
	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		pr_err("ahash request allocation failure\n");
		goto out;
	}

	init_completion(&tresult.completion);
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   tcrypt_complete, &tresult);

714 715 716 717
	output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
	if (!output)
		goto out_nomem;

718 719 720 721 722 723 724 725 726 727 728 729 730
	for (i = 0; speed[i].blen != 0; i++) {
		if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
			pr_err("template (%u) too big for tvmem (%lu)\n",
			       speed[i].blen, TVMEMSIZE * PAGE_SIZE);
			break;
		}

		pr_info("test%3u "
			"(%5u byte blocks,%5u bytes per update,%4u updates): ",
			i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);

		ahash_request_set_crypt(req, sg, output, speed[i].plen);

Mark Rustad's avatar
Mark Rustad committed
731
		if (secs)
732
			ret = test_ahash_jiffies(req, speed[i].blen,
Mark Rustad's avatar
Mark Rustad committed
733
						 speed[i].plen, output, secs);
734 735 736 737 738 739 740 741 742 743
		else
			ret = test_ahash_cycles(req, speed[i].blen,
						speed[i].plen, output);

		if (ret) {
			pr_err("hashing failed ret=%d\n", ret);
			break;
		}
	}

744 745 746
	kfree(output);

out_nomem:
747 748 749 750 751 752
	ahash_request_free(req);

out:
	crypto_free_ahash(tfm);
}

Herbert Xu's avatar
Herbert Xu committed
753 754 755 756 757 758 759 760 761 762 763 764
static void test_ahash_speed(const char *algo, unsigned int secs,
			     struct hash_speed *speed)
{
	return test_ahash_speed_common(algo, secs, speed, 0);
}

static void test_hash_speed(const char *algo, unsigned int secs,
			    struct hash_speed *speed)
{
	return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}

Herbert Xu's avatar
Herbert Xu committed
765
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
766 767 768 769
{
	if (ret == -EINPROGRESS || ret == -EBUSY) {
		struct tcrypt_result *tr = req->base.data;

770
		wait_for_completion(&tr->completion);
771
		reinit_completion(&tr->completion);
772
		ret = tr->err;
773 774 775 776 777
	}

	return ret;
}

Herbert Xu's avatar
Herbert Xu committed
778
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
Mark Rustad's avatar
Mark Rustad committed
779
				int blen, int secs)
780 781 782 783 784
{
	unsigned long start, end;
	int bcount;
	int ret;

Mark Rustad's avatar
Mark Rustad committed
785
	for (start = jiffies, end = start + secs * HZ, bcount = 0;
786 787 788
	     time_before(jiffies, end); bcount++) {
		if (enc)
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
789
						crypto_skcipher_encrypt(req));
790 791
		else
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
792
						crypto_skcipher_decrypt(req));
793 794 795 796 797 798

		if (ret)
			return ret;
	}

	pr_cont("%d operations in %d seconds (%ld bytes)\n",
Mark Rustad's avatar
Mark Rustad committed
799
		bcount, secs, (long)bcount * blen);
800 801 802
	return 0;
}

Herbert Xu's avatar
Herbert Xu committed
803
static int test_acipher_cycles(struct skcipher_request *req, int enc,
804 805 806 807 808 809 810 811 812 813
			       int blen)
{
	unsigned long cycles = 0;
	int ret = 0;
	int i;

	/* Warm-up run. */
	for (i = 0; i < 4; i++) {
		if (enc)
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
814
						crypto_skcipher_encrypt(req));
815 816
		else
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
817
						crypto_skcipher_decrypt(req));
818 819 820 821 822 823 824 825 826 827 828 829

		if (ret)
			goto out;
	}

	/* The real thing. */
	for (i = 0; i < 8; i++) {
		cycles_t start, end;

		start = get_cycles();
		if (enc)
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
830
						crypto_skcipher_encrypt(req));
831 832
		else
			ret = do_one_acipher_op(req,
Herbert Xu's avatar
Herbert Xu committed
833
						crypto_skcipher_decrypt(req));
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
		end = get_cycles();

		if (ret)
			goto out;

		cycles += end - start;
	}

out:
	if (ret == 0)
		pr_cont("1 operation in %lu cycles (%d bytes)\n",
			(cycles + 4) / 8, blen);

	return ret;
}

Herbert Xu's avatar
Herbert Xu committed
850 851 852
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
				struct cipher_speed_template *template,
				unsigned int tcount, u8 *keysize, bool async)
853
{
854
	unsigned int ret, i, j, k, iv_len;
855 856 857
	struct tcrypt_result tresult;
	const char *key;
	char iv[128];
Herbert Xu's avatar
Herbert Xu committed
858 859
	struct skcipher_request *req;
	struct crypto_skcipher *tfm;
860 861 862 863 864 865 866 867 868 869
	const char *e;
	u32 *b_size;

	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	init_completion(&tresult.completion);

Herbert Xu's avatar
Herbert Xu committed
870
	tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
871 872 873 874 875 876 877

	if (IS_ERR(tfm)) {
		pr_err("failed to load transform for %s: %ld\n", algo,
		       PTR_ERR(tfm));
		return;
	}

878
	pr_info("\ntesting speed of async %s (%s) %s\n", algo,
Herbert Xu's avatar
Herbert Xu committed
879
			get_driver_name(crypto_skcipher, tfm), e);
880

Herbert Xu's avatar
Herbert Xu committed
881
	req = skcipher_request_alloc(tfm, GFP_KERNEL);
882 883 884 885 886 887
	if (!req) {
		pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
		       algo);
		goto out;
	}

Herbert Xu's avatar
Herbert Xu committed
888 889
	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				      tcrypt_complete, &tresult);
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918

	i = 0;
	do {
		b_size = block_sizes;

		do {
			struct scatterlist sg[TVMEMSIZE];

			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
				pr_err("template (%u) too big for "
				       "tvmem (%lu)\n", *keysize + *b_size,
				       TVMEMSIZE * PAGE_SIZE);
				goto out_free_req;
			}

			pr_info("test %u (%d bit key, %d byte blocks): ", i,
				*keysize * 8, *b_size);

			memset(tvmem[0], 0xff, PAGE_SIZE);

			/* set key, plain text and IV */
			key = tvmem[0];
			for (j = 0; j < tcount; j++) {
				if (template[j].klen == *keysize) {
					key = template[j].key;
					break;
				}
			}

Herbert Xu's avatar
Herbert Xu committed
919
			crypto_skcipher_clear_flags(tfm, ~0);
920

Herbert Xu's avatar
Herbert Xu committed
921
			ret = crypto_skcipher_setkey(tfm, key, *keysize);
922 923
			if (ret) {
				pr_err("setkey() failed flags=%x\n",
Herbert Xu's avatar
Herbert Xu committed
924
					crypto_skcipher_get_flags(tfm));
925 926 927
				goto out_free_req;
			}

928
			k = *keysize + *b_size;
929 930
			sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));

931 932
			if (k > PAGE_SIZE) {
				sg_set_buf(sg, tvmem[0] + *keysize,
933
				   PAGE_SIZE - *keysize);
934 935 936 937 938 939 940 941 942 943 944 945
				k -= PAGE_SIZE;
				j = 1;
				while (k > PAGE_SIZE) {
					sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
					memset(tvmem[j], 0xff, PAGE_SIZE);
					j++;
					k -= PAGE_SIZE;
				}
				sg_set_buf(sg + j, tvmem[j], k);
				memset(tvmem[j], 0xff, k);
			} else {
				sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
946 947
			}

Herbert Xu's avatar
Herbert Xu committed
948
			iv_len = crypto_skcipher_ivsize(tfm);
949 950 951
			if (iv_len)
				memset(&iv, 0xff, iv_len);

Herbert Xu's avatar
Herbert Xu committed
952
			skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
953

Mark Rustad's avatar
Mark Rustad committed
954
			if (secs)
955
				ret = test_acipher_jiffies(req, enc,
Mark Rustad's avatar
Mark Rustad committed
956
							   *b_size, secs);
957 958 959 960 961 962
			else
				ret = test_acipher_cycles(req, enc,
							  *b_size);

			if (ret) {
				pr_err("%s() failed flags=%x\n", e,
Herbert Xu's avatar
Herbert Xu committed
963
				       crypto_skcipher_get_flags(tfm));
964 965 966 967 968 969 970 971 972
				break;
			}
			b_size++;
			i++;
		} while (*b_size);
		keysize++;
	} while (*keysize);

out_free_req:
Herbert Xu's avatar
Herbert Xu committed
973
	skcipher_request_free(req);
974
out:
Herbert Xu's avatar
Herbert Xu committed
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
	crypto_free_skcipher(tfm);
}

static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
			       struct cipher_speed_template *template,
			       unsigned int tcount, u8 *keysize)
{
	return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
				   true);
}

static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
			      struct cipher_speed_template *template,
			      unsigned int tcount, u8 *keysize)
{
	return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
				   false);
992 993
}

994
static void test_available(void)
Linus Torvalds's avatar
Linus Torvalds committed
995 996
{
	char **name = check;
997

Linus Torvalds's avatar
Linus Torvalds committed
998 999
	while (*name) {
		printk("alg %s ", *name);
1000
		printk(crypto_has_alg(*name, 0, 0) ?
1001
		       "found\n" : "not found\n");
Linus Torvalds's avatar
Linus Torvalds committed
1002
		name++;
1003
	}
Linus Torvalds's avatar
Linus Torvalds committed
1004 1005
}

1006 1007
static inline int tcrypt_test(const char *alg)
{
1008 1009
	int ret;

1010 1011
	pr_debug("testing %s\n", alg);

1012 1013 1014 1015 1016
	ret = alg_test(alg, alg, 0, 0);
	/* non-fips algs return -EINVAL in fips mode */
	if (fips_enabled && ret == -EINVAL)
		ret = 0;
	return ret;
1017 1018
}

1019
static int do_test(const char *alg, u32 type, u32 mask, int m)
1020 1021
{
	int i;
1022
	int ret = 0;
1023 1024

	switch (m) {
Linus Torvalds's avatar
Linus Torvalds committed
1025
	case 0:
1026 1027 1028 1029 1030 1031 1032
		if (alg) {
			if (!crypto_has_alg(alg, type,
					    mask ?: CRYPTO_ALG_TYPE_MASK))
				ret = -ENOENT;
			break;
		}

1033
		for (i = 1; i < 200; i++)
1034
			ret += do_test(NULL, 0, 0, i);
Linus Torvalds's avatar
Linus Torvalds committed
1035 1036 1037
		break;

	case 1:
1038
		ret += tcrypt_test("md5");
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040 1041
		break;

	case 2:
1042
		ret += tcrypt_test("sha1");
Linus Torvalds's avatar
Linus Torvalds committed
1043 1044 1045
		break;

	case 3:
1046 1047
		ret += tcrypt_test("ecb(des)");
		ret += tcrypt_test("cbc(des)");
1048
		ret += tcrypt_test("ctr(des)");
Linus Torvalds's avatar
Linus Torvalds committed
1049 1050 1051
		break;

	case 4:
1052 1053
		ret += tcrypt_test("ecb(des3_ede)");
		ret += tcrypt_test("cbc(des3_ede)");
1054
		ret += tcrypt_test("ctr(des3_ede)");
Linus Torvalds's avatar
Linus Torvalds committed
1055 1056 1057
		break;

	case 5:
1058
		ret += tcrypt_test("md4");
Linus Torvalds's avatar
Linus Torvalds committed
1059
		break;
1060

Linus Torvalds's avatar
Linus Torvalds committed
1061
	case 6:
1062
		ret += tcrypt_test("sha256");
Linus Torvalds's avatar
Linus Torvalds committed
1063
		break;
1064

Linus Torvalds's avatar
Linus Torvalds committed
1065
	case 7:
1066 1067
		ret += tcrypt_test("ecb(blowfish)");
		ret += tcrypt_test("cbc(blowfish)");
1068
		ret += tcrypt_test("ctr(blowfish)");
Linus Torvalds's avatar
Linus Torvalds committed
1069 1070 1071
		break;

	case 8:
1072 1073
		ret += tcrypt_test("ecb(twofish)");
		ret += tcrypt_test("cbc(twofish)");
1074
		ret += tcrypt_test("ctr(twofish)");
1075
		ret += tcrypt_test("lrw(twofish)");
1076
		ret += tcrypt_test("xts(twofish)");
Linus Torvalds's avatar
Linus Torvalds committed
1077
		break;
1078

Linus Torvalds's avatar
Linus Torvalds committed
1079
	case 9:
1080
		ret += tcrypt_test("ecb(serpent)");
1081 1082
		ret += tcrypt_test("cbc(serpent)");
		ret += tcrypt_test("ctr(serpent)");
1083
		ret += tcrypt_test("lrw(serpent)");
1084
		ret += tcrypt_test("xts(serpent)");
Linus Torvalds's avatar
Linus Torvalds committed
1085 1086 1087
		break;

	case 10:
1088 1089 1090 1091 1092 1093
		ret += tcrypt_test("ecb(aes)");
		ret += tcrypt_test("cbc(aes)");
		ret += tcrypt_test("lrw(aes)");
		ret += tcrypt_test("xts(aes)");
		ret += tcrypt_test("ctr(aes)");
		ret += tcrypt_test("rfc3686(ctr(aes))");
Linus Torvalds's avatar
Linus Torvalds committed
1094 1095 1096
		break;

	case 11:
1097
		ret += tcrypt_test("sha384");
Linus Torvalds's avatar
Linus Torvalds committed
1098
		break;
1099

Linus Torvalds's avatar
Linus Torvalds committed
1100
	case 12:
1101
		ret += tcrypt_test("sha512");
Linus Torvalds's avatar
Linus Torvalds committed
1102 1103 1104
		break;

	case 13:
1105
		ret += tcrypt_test("deflate");
Linus Torvalds's avatar
Linus Torvalds committed
1106 1107 1108
		break;

	case 14:
1109
		ret += tcrypt_test("ecb(cast5)");
1110 1111
		ret += tcrypt_test("cbc(cast5)");
		ret += tcrypt_test("ctr(cast5)");
Linus Torvalds's avatar
Linus Torvalds committed
1112 1113 1114
		break;

	case 15:
1115
		ret += tcrypt_test("ecb(cast6)");
1116 1117 1118 1119
		ret += tcrypt_test("cbc(cast6)");
		ret += tcrypt_test("ctr(cast6)");
		ret += tcrypt_test("lrw(cast6)");
		ret += tcrypt_test("xts(cast6)");
Linus Torvalds's avatar
Linus Torvalds committed
1120 1121 1122
		break;

	case 16:
1123
		ret += tcrypt_test("ecb(arc4)");
Linus Torvalds's avatar
Linus Torvalds committed
1124 1125 1126
		break;

	case 17:
1127
		ret += tcrypt_test("michael_mic");
Linus Torvalds's avatar
Linus Torvalds committed
1128 1129 1130
		break;

	case 18:
1131
		ret += tcrypt_test("crc32c");
Linus Torvalds's avatar
Linus Torvalds committed
1132 1133 1134
		break;

	case 19:
1135
		ret += tcrypt_test("ecb(tea)");
Linus Torvalds's avatar
Linus Torvalds committed
1136 1137 1138
		break;

	case 20:
1139
		ret += tcrypt_test("ecb(xtea)");
Linus Torvalds's avatar
Linus Torvalds committed
1140 1141 1142
		break;

	case 21:
1143
		ret += tcrypt_test("ecb(khazad)");
Linus Torvalds's avatar
Linus Torvalds committed
1144 1145 1146
		break;

	case 22:
1147
		ret += tcrypt_test("wp512");
Linus Torvalds's avatar
Linus Torvalds committed
1148 1149 1150
		break;

	case 23:
1151
		ret += tcrypt_test("wp384");
Linus Torvalds's avatar
Linus Torvalds committed
1152 1153 1154
		break;

	case 24:
1155
		ret += tcrypt_test("wp256");
Linus Torvalds's avatar