1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
5 #include <linux/module.h>
6 #include <linux/kernel.h>
8 #include <linux/pci_ids.h>
9 #include <linux/crypto.h>
10 #include <linux/spinlock.h>
11 #include <crypto/algapi.h>
12 #include <crypto/aes.h>
15 #include <linux/delay.h>
17 #include "geode-aes.h"
19 /* Static structures */
21 static void __iomem *_iobase;
22 static spinlock_t lock;
24 /* Write a 128 bit field (either a writable key or IV) */
26 _writefield(u32 offset, void *value)
30 for (i = 0; i < 4; i++)
31 iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
34 /* Read a 128 bit field (either a writable key or IV) */
36 _readfield(u32 offset, void *value)
40 for (i = 0; i < 4; i++)
41 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
45 do_crypt(void *src, void *dst, int len, u32 flags)
48 u32 counter = AES_OP_TIMEOUT;
50 iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
51 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
52 iowrite32(len, _iobase + AES_LENA_REG);
54 /* Start the operation */
55 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
58 status = ioread32(_iobase + AES_INTR_REG);
60 } while (!(status & AES_INTRA_PENDING) && --counter);
63 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
64 return counter ? 0 : 1;
68 geode_aes_crypt(struct geode_aes_op *op)
77 /* If the source and destination is the same, then
78 * we need to turn on the coherent flags, otherwise
79 * we don't need to worry
82 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
84 if (op->dir == AES_DIR_ENCRYPT)
85 flags |= AES_CTRL_ENCRYPT;
87 /* Start the critical section */
89 spin_lock_irqsave(&lock, iflags);
91 if (op->mode == AES_MODE_CBC) {
92 flags |= AES_CTRL_CBC;
93 _writefield(AES_WRITEIV0_REG, op->iv);
96 if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
97 flags |= AES_CTRL_WRKEY;
98 _writefield(AES_WRITEKEY0_REG, op->key);
101 ret = do_crypt(op->src, op->dst, op->len, flags);
104 if (op->mode == AES_MODE_CBC)
105 _readfield(AES_WRITEIV0_REG, op->iv);
107 spin_unlock_irqrestore(&lock, iflags);
112 /* CRYPTO-API Functions */
114 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
117 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
122 if (len == AES_KEYSIZE_128) {
123 memcpy(op->key, key, len);
127 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
128 /* not supported at all */
129 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
134 * The requested key size is not supported by HW, do a fallback
136 op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
137 op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
139 ret = crypto_cipher_setkey(op->fallback.cip, key, len);
141 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
142 tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
147 static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
150 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
155 if (len == AES_KEYSIZE_128) {
156 memcpy(op->key, key, len);
160 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
161 /* not supported at all */
162 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
167 * The requested key size is not supported by HW, do a fallback
169 op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
170 op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
172 ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
174 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
175 tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
180 static int fallback_blk_dec(struct blkcipher_desc *desc,
181 struct scatterlist *dst, struct scatterlist *src,
185 struct crypto_blkcipher *tfm;
186 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
189 desc->tfm = op->fallback.blk;
191 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
196 static int fallback_blk_enc(struct blkcipher_desc *desc,
197 struct scatterlist *dst, struct scatterlist *src,
201 struct crypto_blkcipher *tfm;
202 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
205 desc->tfm = op->fallback.blk;
207 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
214 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
216 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
218 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
219 crypto_cipher_encrypt_one(op->fallback.cip, out, in);
223 op->src = (void *) in;
224 op->dst = (void *) out;
225 op->mode = AES_MODE_ECB;
227 op->len = AES_BLOCK_SIZE;
228 op->dir = AES_DIR_ENCRYPT;
235 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
237 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
239 if (unlikely(op->keylen != AES_KEYSIZE_128)) {
240 crypto_cipher_decrypt_one(op->fallback.cip, out, in);
244 op->src = (void *) in;
245 op->dst = (void *) out;
246 op->mode = AES_MODE_ECB;
248 op->len = AES_BLOCK_SIZE;
249 op->dir = AES_DIR_DECRYPT;
254 static int fallback_init_cip(struct crypto_tfm *tfm)
256 const char *name = crypto_tfm_alg_name(tfm);
257 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
259 op->fallback.cip = crypto_alloc_cipher(name, 0,
260 CRYPTO_ALG_NEED_FALLBACK);
262 if (IS_ERR(op->fallback.cip)) {
263 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
264 return PTR_ERR(op->fallback.cip);
270 static void fallback_exit_cip(struct crypto_tfm *tfm)
272 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
274 crypto_free_cipher(op->fallback.cip);
275 op->fallback.cip = NULL;
278 static struct crypto_alg geode_alg = {
280 .cra_driver_name = "geode-aes",
283 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
284 CRYPTO_ALG_NEED_FALLBACK,
285 .cra_init = fallback_init_cip,
286 .cra_exit = fallback_exit_cip,
287 .cra_blocksize = AES_BLOCK_SIZE,
288 .cra_ctxsize = sizeof(struct geode_aes_op),
289 .cra_module = THIS_MODULE,
292 .cia_min_keysize = AES_MIN_KEY_SIZE,
293 .cia_max_keysize = AES_MAX_KEY_SIZE,
294 .cia_setkey = geode_setkey_cip,
295 .cia_encrypt = geode_encrypt,
296 .cia_decrypt = geode_decrypt
302 geode_cbc_decrypt(struct blkcipher_desc *desc,
303 struct scatterlist *dst, struct scatterlist *src,
306 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
307 struct blkcipher_walk walk;
310 if (unlikely(op->keylen != AES_KEYSIZE_128))
311 return fallback_blk_dec(desc, dst, src, nbytes);
313 blkcipher_walk_init(&walk, dst, src, nbytes);
314 err = blkcipher_walk_virt(desc, &walk);
317 while ((nbytes = walk.nbytes)) {
318 op->src = walk.src.virt.addr,
319 op->dst = walk.dst.virt.addr;
320 op->mode = AES_MODE_CBC;
321 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
322 op->dir = AES_DIR_DECRYPT;
324 ret = geode_aes_crypt(op);
327 err = blkcipher_walk_done(desc, &walk, nbytes);
334 geode_cbc_encrypt(struct blkcipher_desc *desc,
335 struct scatterlist *dst, struct scatterlist *src,
338 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
339 struct blkcipher_walk walk;
342 if (unlikely(op->keylen != AES_KEYSIZE_128))
343 return fallback_blk_enc(desc, dst, src, nbytes);
345 blkcipher_walk_init(&walk, dst, src, nbytes);
346 err = blkcipher_walk_virt(desc, &walk);
349 while ((nbytes = walk.nbytes)) {
350 op->src = walk.src.virt.addr,
351 op->dst = walk.dst.virt.addr;
352 op->mode = AES_MODE_CBC;
353 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
354 op->dir = AES_DIR_ENCRYPT;
356 ret = geode_aes_crypt(op);
358 err = blkcipher_walk_done(desc, &walk, nbytes);
364 static int fallback_init_blk(struct crypto_tfm *tfm)
366 const char *name = crypto_tfm_alg_name(tfm);
367 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
369 op->fallback.blk = crypto_alloc_blkcipher(name, 0,
370 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
372 if (IS_ERR(op->fallback.blk)) {
373 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
374 return PTR_ERR(op->fallback.blk);
380 static void fallback_exit_blk(struct crypto_tfm *tfm)
382 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
384 crypto_free_blkcipher(op->fallback.blk);
385 op->fallback.blk = NULL;
388 static struct crypto_alg geode_cbc_alg = {
389 .cra_name = "cbc(aes)",
390 .cra_driver_name = "cbc-aes-geode",
392 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
393 CRYPTO_ALG_KERN_DRIVER_ONLY |
394 CRYPTO_ALG_NEED_FALLBACK,
395 .cra_init = fallback_init_blk,
396 .cra_exit = fallback_exit_blk,
397 .cra_blocksize = AES_BLOCK_SIZE,
398 .cra_ctxsize = sizeof(struct geode_aes_op),
400 .cra_type = &crypto_blkcipher_type,
401 .cra_module = THIS_MODULE,
404 .min_keysize = AES_MIN_KEY_SIZE,
405 .max_keysize = AES_MAX_KEY_SIZE,
406 .setkey = geode_setkey_blk,
407 .encrypt = geode_cbc_encrypt,
408 .decrypt = geode_cbc_decrypt,
409 .ivsize = AES_BLOCK_SIZE,
415 geode_ecb_decrypt(struct blkcipher_desc *desc,
416 struct scatterlist *dst, struct scatterlist *src,
419 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
420 struct blkcipher_walk walk;
423 if (unlikely(op->keylen != AES_KEYSIZE_128))
424 return fallback_blk_dec(desc, dst, src, nbytes);
426 blkcipher_walk_init(&walk, dst, src, nbytes);
427 err = blkcipher_walk_virt(desc, &walk);
429 while ((nbytes = walk.nbytes)) {
430 op->src = walk.src.virt.addr,
431 op->dst = walk.dst.virt.addr;
432 op->mode = AES_MODE_ECB;
433 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
434 op->dir = AES_DIR_DECRYPT;
436 ret = geode_aes_crypt(op);
438 err = blkcipher_walk_done(desc, &walk, nbytes);
445 geode_ecb_encrypt(struct blkcipher_desc *desc,
446 struct scatterlist *dst, struct scatterlist *src,
449 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
450 struct blkcipher_walk walk;
453 if (unlikely(op->keylen != AES_KEYSIZE_128))
454 return fallback_blk_enc(desc, dst, src, nbytes);
456 blkcipher_walk_init(&walk, dst, src, nbytes);
457 err = blkcipher_walk_virt(desc, &walk);
459 while ((nbytes = walk.nbytes)) {
460 op->src = walk.src.virt.addr,
461 op->dst = walk.dst.virt.addr;
462 op->mode = AES_MODE_ECB;
463 op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
464 op->dir = AES_DIR_ENCRYPT;
466 ret = geode_aes_crypt(op);
468 ret = blkcipher_walk_done(desc, &walk, nbytes);
474 static struct crypto_alg geode_ecb_alg = {
475 .cra_name = "ecb(aes)",
476 .cra_driver_name = "ecb-aes-geode",
478 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
479 CRYPTO_ALG_KERN_DRIVER_ONLY |
480 CRYPTO_ALG_NEED_FALLBACK,
481 .cra_init = fallback_init_blk,
482 .cra_exit = fallback_exit_blk,
483 .cra_blocksize = AES_BLOCK_SIZE,
484 .cra_ctxsize = sizeof(struct geode_aes_op),
486 .cra_type = &crypto_blkcipher_type,
487 .cra_module = THIS_MODULE,
490 .min_keysize = AES_MIN_KEY_SIZE,
491 .max_keysize = AES_MAX_KEY_SIZE,
492 .setkey = geode_setkey_blk,
493 .encrypt = geode_ecb_encrypt,
494 .decrypt = geode_ecb_decrypt,
499 static void geode_aes_remove(struct pci_dev *dev)
501 crypto_unregister_alg(&geode_alg);
502 crypto_unregister_alg(&geode_ecb_alg);
503 crypto_unregister_alg(&geode_cbc_alg);
505 pci_iounmap(dev, _iobase);
508 pci_release_regions(dev);
509 pci_disable_device(dev);
513 static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
517 ret = pci_enable_device(dev);
521 ret = pci_request_regions(dev, "geode-aes");
525 _iobase = pci_iomap(dev, 0, 0);
527 if (_iobase == NULL) {
532 spin_lock_init(&lock);
534 /* Clear any pending activity */
535 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
537 ret = crypto_register_alg(&geode_alg);
541 ret = crypto_register_alg(&geode_ecb_alg);
545 ret = crypto_register_alg(&geode_cbc_alg);
549 dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
553 crypto_unregister_alg(&geode_ecb_alg);
556 crypto_unregister_alg(&geode_alg);
559 pci_iounmap(dev, _iobase);
562 pci_release_regions(dev);
565 pci_disable_device(dev);
567 dev_err(&dev->dev, "GEODE AES initialization failed.\n");
571 static struct pci_device_id geode_aes_tbl[] = {
572 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
576 MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
578 static struct pci_driver geode_aes_driver = {
579 .name = "Geode LX AES",
580 .id_table = geode_aes_tbl,
581 .probe = geode_aes_probe,
582 .remove = geode_aes_remove,
585 module_pci_driver(geode_aes_driver);
587 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
588 MODULE_DESCRIPTION("Geode LX Hardware AES driver");
589 MODULE_LICENSE("GPL");