1 // SPDX-License-Identifier: GPL-2.0-only
5 * Support for VIA PadLock hardware crypto engine.
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
11 #include <crypto/algapi.h>
12 #include <crypto/aes.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/padlock.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/percpu.h>
22 #include <linux/smp.h>
23 #include <linux/slab.h>
24 #include <asm/cpu_device_id.h>
25 #include <asm/byteorder.h>
26 #include <asm/processor.h>
27 #include <asm/fpu/api.h>
30 * Number of data blocks actually fetched for each xcrypt insn.
31 * Processors with prefetch errata will fetch extra blocks.
33 static unsigned int ecb_fetch_blocks = 2;
34 #define MAX_ECB_FETCH_BLOCKS (8)
35 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
37 static unsigned int cbc_fetch_blocks = 1;
38 #define MAX_CBC_FETCH_BLOCKS (4)
39 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
43 unsigned int __attribute__ ((__packed__))
50 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
52 /* Whenever making any changes to the following
53 * structure *make sure* you keep E, d_data
54 * and cword aligned on 16 Bytes boundaries and
55 * the Hardware can access 16 * 16 bytes of E and d_data
56 * (only the first 15 * 16 bytes matter but the HW reads
60 u32 E[AES_MAX_KEYLENGTH_U32]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 u32 d_data[AES_MAX_KEYLENGTH_U32]
63 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
71 static DEFINE_PER_CPU(struct cword *, paes_last_cword);
73 /* Tells whether the ACE is capable to generate
74 the extended key for a given key_len. */
76 aes_hw_extkey_available(uint8_t key_len)
78 /* TODO: We should check the actual CPU model/stepping
79 as it's possible that the capability will be
80 added in the next CPU revisions. */
86 static inline struct aes_ctx *aes_ctx_common(void *ctx)
88 unsigned long addr = (unsigned long)ctx;
89 unsigned long align = PADLOCK_ALIGNMENT;
91 if (align <= crypto_tfm_ctx_alignment())
93 return (struct aes_ctx *)ALIGN(addr, align);
96 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
98 return aes_ctx_common(crypto_tfm_ctx(tfm));
101 static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
103 return aes_ctx_common(crypto_skcipher_ctx(tfm));
106 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
107 unsigned int key_len)
109 struct aes_ctx *ctx = aes_ctx(tfm);
110 const __le32 *key = (const __le32 *)in_key;
111 u32 *flags = &tfm->crt_flags;
112 struct crypto_aes_ctx gen_aes;
116 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
121 * If the hardware is capable of generating the extended key
122 * itself we must supply the plain key for both encryption
127 ctx->E[0] = le32_to_cpu(key[0]);
128 ctx->E[1] = le32_to_cpu(key[1]);
129 ctx->E[2] = le32_to_cpu(key[2]);
130 ctx->E[3] = le32_to_cpu(key[3]);
132 /* Prepare control words. */
133 memset(&ctx->cword, 0, sizeof(ctx->cword));
135 ctx->cword.decrypt.encdec = 1;
136 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
137 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
138 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
139 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
141 /* Don't generate extended keys if the hardware can do it. */
142 if (aes_hw_extkey_available(key_len))
145 ctx->D = ctx->d_data;
146 ctx->cword.encrypt.keygen = 1;
147 ctx->cword.decrypt.keygen = 1;
149 if (aes_expandkey(&gen_aes, in_key, key_len)) {
150 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
154 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
155 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
158 for_each_online_cpu(cpu)
159 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
160 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
161 per_cpu(paes_last_cword, cpu) = NULL;
166 static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
167 unsigned int key_len)
169 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
172 /* ====== Encryption/decryption routines ====== */
174 /* These are the real call to PadLock. */
175 static inline void padlock_reset_key(struct cword *cword)
177 int cpu = raw_smp_processor_id();
179 if (cword != per_cpu(paes_last_cword, cpu))
180 #ifndef CONFIG_X86_64
181 asm volatile ("pushfl; popfl");
183 asm volatile ("pushfq; popfq");
187 static inline void padlock_store_cword(struct cword *cword)
189 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
193 * While the padlock instructions don't use FP/SSE registers, they
194 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
195 * the kernel doesn't use CR0.TS.
198 static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
199 struct cword *control_word, int count)
201 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
202 : "+S"(input), "+D"(output)
203 : "d"(control_word), "b"(key), "c"(count));
206 static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
207 u8 *iv, struct cword *control_word, int count)
209 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
210 : "+S" (input), "+D" (output), "+a" (iv)
211 : "d" (control_word), "b" (key), "c" (count));
215 static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
216 struct cword *cword, int count)
219 * Padlock prefetches extra data so we must provide mapped input buffers.
220 * Assume there are at least 16 bytes of stack already in use.
222 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
223 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
225 memcpy(tmp, in, count * AES_BLOCK_SIZE);
226 rep_xcrypt_ecb(tmp, out, key, cword, count);
229 static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
230 u8 *iv, struct cword *cword, int count)
233 * Padlock prefetches extra data so we must provide mapped input buffers.
234 * Assume there are at least 16 bytes of stack already in use.
236 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
237 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
239 memcpy(tmp, in, count * AES_BLOCK_SIZE);
240 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
243 static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
244 struct cword *cword, int count)
246 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
247 * We could avoid some copying here but it's probably not worth it.
249 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
250 ecb_crypt_copy(in, out, key, cword, count);
254 rep_xcrypt_ecb(in, out, key, cword, count);
257 static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
258 u8 *iv, struct cword *cword, int count)
260 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
261 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
262 return cbc_crypt_copy(in, out, key, iv, cword, count);
264 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
267 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
268 void *control_word, u32 count)
270 u32 initial = count & (ecb_fetch_blocks - 1);
272 if (count < ecb_fetch_blocks) {
273 ecb_crypt(input, output, key, control_word, count);
280 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
281 : "+S"(input), "+D"(output)
282 : "d"(control_word), "b"(key), "c"(initial));
284 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
285 : "+S"(input), "+D"(output)
286 : "d"(control_word), "b"(key), "c"(count));
289 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
290 u8 *iv, void *control_word, u32 count)
292 u32 initial = count & (cbc_fetch_blocks - 1);
294 if (count < cbc_fetch_blocks)
295 return cbc_crypt(input, output, key, iv, control_word, count);
300 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
301 : "+S" (input), "+D" (output), "+a" (iv)
302 : "d" (control_word), "b" (key), "c" (initial));
304 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
305 : "+S" (input), "+D" (output), "+a" (iv)
306 : "d" (control_word), "b" (key), "c" (count));
310 static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
312 struct aes_ctx *ctx = aes_ctx(tfm);
314 padlock_reset_key(&ctx->cword.encrypt);
315 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
316 padlock_store_cword(&ctx->cword.encrypt);
319 static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
321 struct aes_ctx *ctx = aes_ctx(tfm);
323 padlock_reset_key(&ctx->cword.encrypt);
324 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
325 padlock_store_cword(&ctx->cword.encrypt);
328 static struct crypto_alg aes_alg = {
330 .cra_driver_name = "aes-padlock",
331 .cra_priority = PADLOCK_CRA_PRIORITY,
332 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
333 .cra_blocksize = AES_BLOCK_SIZE,
334 .cra_ctxsize = sizeof(struct aes_ctx),
335 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
336 .cra_module = THIS_MODULE,
339 .cia_min_keysize = AES_MIN_KEY_SIZE,
340 .cia_max_keysize = AES_MAX_KEY_SIZE,
341 .cia_setkey = aes_set_key,
342 .cia_encrypt = padlock_aes_encrypt,
343 .cia_decrypt = padlock_aes_decrypt,
348 static int ecb_aes_encrypt(struct skcipher_request *req)
350 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
351 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
352 struct skcipher_walk walk;
356 padlock_reset_key(&ctx->cword.encrypt);
358 err = skcipher_walk_virt(&walk, req, false);
360 while ((nbytes = walk.nbytes) != 0) {
361 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
362 ctx->E, &ctx->cword.encrypt,
363 nbytes / AES_BLOCK_SIZE);
364 nbytes &= AES_BLOCK_SIZE - 1;
365 err = skcipher_walk_done(&walk, nbytes);
368 padlock_store_cword(&ctx->cword.encrypt);
373 static int ecb_aes_decrypt(struct skcipher_request *req)
375 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
376 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
377 struct skcipher_walk walk;
381 padlock_reset_key(&ctx->cword.decrypt);
383 err = skcipher_walk_virt(&walk, req, false);
385 while ((nbytes = walk.nbytes) != 0) {
386 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
387 ctx->D, &ctx->cword.decrypt,
388 nbytes / AES_BLOCK_SIZE);
389 nbytes &= AES_BLOCK_SIZE - 1;
390 err = skcipher_walk_done(&walk, nbytes);
393 padlock_store_cword(&ctx->cword.encrypt);
398 static struct skcipher_alg ecb_aes_alg = {
399 .base.cra_name = "ecb(aes)",
400 .base.cra_driver_name = "ecb-aes-padlock",
401 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
402 .base.cra_blocksize = AES_BLOCK_SIZE,
403 .base.cra_ctxsize = sizeof(struct aes_ctx),
404 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
405 .base.cra_module = THIS_MODULE,
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .setkey = aes_set_key_skcipher,
409 .encrypt = ecb_aes_encrypt,
410 .decrypt = ecb_aes_decrypt,
413 static int cbc_aes_encrypt(struct skcipher_request *req)
415 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
416 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
417 struct skcipher_walk walk;
421 padlock_reset_key(&ctx->cword.encrypt);
423 err = skcipher_walk_virt(&walk, req, false);
425 while ((nbytes = walk.nbytes) != 0) {
426 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
427 walk.dst.virt.addr, ctx->E,
428 walk.iv, &ctx->cword.encrypt,
429 nbytes / AES_BLOCK_SIZE);
430 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
431 nbytes &= AES_BLOCK_SIZE - 1;
432 err = skcipher_walk_done(&walk, nbytes);
435 padlock_store_cword(&ctx->cword.decrypt);
440 static int cbc_aes_decrypt(struct skcipher_request *req)
442 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
443 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
444 struct skcipher_walk walk;
448 padlock_reset_key(&ctx->cword.encrypt);
450 err = skcipher_walk_virt(&walk, req, false);
452 while ((nbytes = walk.nbytes) != 0) {
453 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
454 ctx->D, walk.iv, &ctx->cword.decrypt,
455 nbytes / AES_BLOCK_SIZE);
456 nbytes &= AES_BLOCK_SIZE - 1;
457 err = skcipher_walk_done(&walk, nbytes);
460 padlock_store_cword(&ctx->cword.encrypt);
465 static struct skcipher_alg cbc_aes_alg = {
466 .base.cra_name = "cbc(aes)",
467 .base.cra_driver_name = "cbc-aes-padlock",
468 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
469 .base.cra_blocksize = AES_BLOCK_SIZE,
470 .base.cra_ctxsize = sizeof(struct aes_ctx),
471 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
472 .base.cra_module = THIS_MODULE,
473 .min_keysize = AES_MIN_KEY_SIZE,
474 .max_keysize = AES_MAX_KEY_SIZE,
475 .ivsize = AES_BLOCK_SIZE,
476 .setkey = aes_set_key_skcipher,
477 .encrypt = cbc_aes_encrypt,
478 .decrypt = cbc_aes_decrypt,
481 static const struct x86_cpu_id padlock_cpu_id[] = {
482 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
485 MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
487 static int __init padlock_init(void)
490 struct cpuinfo_x86 *c = &cpu_data(0);
492 if (!x86_match_cpu(padlock_cpu_id))
495 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
496 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
500 if ((ret = crypto_register_alg(&aes_alg)) != 0)
503 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
506 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
509 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
511 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
512 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
513 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
514 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
521 crypto_unregister_skcipher(&ecb_aes_alg);
523 crypto_unregister_alg(&aes_alg);
525 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
529 static void __exit padlock_fini(void)
531 crypto_unregister_skcipher(&cbc_aes_alg);
532 crypto_unregister_skcipher(&ecb_aes_alg);
533 crypto_unregister_alg(&aes_alg);
536 module_init(padlock_init);
537 module_exit(padlock_fini);
539 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
540 MODULE_LICENSE("GPL");
541 MODULE_AUTHOR("Michal Ludvig");
543 MODULE_ALIAS_CRYPTO("aes");