1 // SPDX-License-Identifier: GPL-2.0
5 * Support for ATMEL DES/TDES HW acceleration.
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
8 * Author: Nicolas Royer <nicolas@eukrea.com>
10 * Some ideas are from omap-aes.c drivers.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/clk.h>
20 #include <linux/hw_random.h>
21 #include <linux/platform_device.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/scatterlist.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/of_device.h>
31 #include <linux/delay.h>
32 #include <linux/crypto.h>
33 #include <linux/cryptohash.h>
34 #include <crypto/scatterwalk.h>
35 #include <crypto/algapi.h>
36 #include <crypto/internal/des.h>
37 #include <crypto/hash.h>
38 #include <crypto/internal/hash.h>
39 #include <crypto/internal/skcipher.h>
40 #include <linux/platform_data/crypto-atmel.h>
41 #include "atmel-tdes-regs.h"
44 #define TDES_FLAGS_MODE_MASK 0x00ff
45 #define TDES_FLAGS_ENCRYPT BIT(0)
46 #define TDES_FLAGS_CBC BIT(1)
47 #define TDES_FLAGS_CFB BIT(2)
48 #define TDES_FLAGS_CFB8 BIT(3)
49 #define TDES_FLAGS_CFB16 BIT(4)
50 #define TDES_FLAGS_CFB32 BIT(5)
51 #define TDES_FLAGS_CFB64 BIT(6)
52 #define TDES_FLAGS_OFB BIT(7)
54 #define TDES_FLAGS_INIT BIT(16)
55 #define TDES_FLAGS_FAST BIT(17)
56 #define TDES_FLAGS_BUSY BIT(18)
57 #define TDES_FLAGS_DMA BIT(19)
59 #define ATMEL_TDES_QUEUE_LENGTH 50
61 #define CFB8_BLOCK_SIZE 1
62 #define CFB16_BLOCK_SIZE 2
63 #define CFB32_BLOCK_SIZE 4
65 struct atmel_tdes_caps {
70 struct atmel_tdes_dev;
72 struct atmel_tdes_ctx {
73 struct atmel_tdes_dev *dd;
76 u32 key[DES3_EDE_KEY_SIZE / sizeof(u32)];
82 struct atmel_tdes_reqctx {
84 u8 lastc[DES_BLOCK_SIZE];
87 struct atmel_tdes_dma {
88 struct dma_chan *chan;
89 struct dma_slave_config dma_conf;
92 struct atmel_tdes_dev {
93 struct list_head list;
94 unsigned long phys_base;
95 void __iomem *io_base;
97 struct atmel_tdes_ctx *ctx;
106 struct crypto_queue queue;
108 struct tasklet_struct done_task;
109 struct tasklet_struct queue_task;
111 struct skcipher_request *req;
114 struct scatterlist *in_sg;
115 unsigned int nb_in_sg;
117 struct scatterlist *out_sg;
118 unsigned int nb_out_sg;
126 dma_addr_t dma_addr_in;
127 struct atmel_tdes_dma dma_lch_in;
131 dma_addr_t dma_addr_out;
132 struct atmel_tdes_dma dma_lch_out;
134 struct atmel_tdes_caps caps;
139 struct atmel_tdes_drv {
140 struct list_head dev_list;
144 static struct atmel_tdes_drv atmel_tdes = {
145 .dev_list = LIST_HEAD_INIT(atmel_tdes.dev_list),
146 .lock = __SPIN_LOCK_UNLOCKED(atmel_tdes.lock),
149 static int atmel_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
150 void *buf, size_t buflen, size_t total, int out)
152 size_t count, off = 0;
154 while (buflen && total) {
155 count = min((*sg)->length - *offset, total);
156 count = min(count, buflen);
161 scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
168 if (*offset == (*sg)->length) {
180 static inline u32 atmel_tdes_read(struct atmel_tdes_dev *dd, u32 offset)
182 return readl_relaxed(dd->io_base + offset);
185 static inline void atmel_tdes_write(struct atmel_tdes_dev *dd,
186 u32 offset, u32 value)
188 writel_relaxed(value, dd->io_base + offset);
191 static void atmel_tdes_write_n(struct atmel_tdes_dev *dd, u32 offset,
192 u32 *value, int count)
194 for (; count--; value++, offset += 4)
195 atmel_tdes_write(dd, offset, *value);
198 static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
200 struct atmel_tdes_dev *tdes_dd = NULL;
201 struct atmel_tdes_dev *tmp;
203 spin_lock_bh(&atmel_tdes.lock);
205 list_for_each_entry(tmp, &atmel_tdes.dev_list, list) {
213 spin_unlock_bh(&atmel_tdes.lock);
218 static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
222 err = clk_prepare_enable(dd->iclk);
226 if (!(dd->flags & TDES_FLAGS_INIT)) {
227 atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
228 dd->flags |= TDES_FLAGS_INIT;
235 static inline unsigned int atmel_tdes_get_version(struct atmel_tdes_dev *dd)
237 return atmel_tdes_read(dd, TDES_HW_VERSION) & 0x00000fff;
240 static void atmel_tdes_hw_version_init(struct atmel_tdes_dev *dd)
242 atmel_tdes_hw_init(dd);
244 dd->hw_version = atmel_tdes_get_version(dd);
247 "version: 0x%x\n", dd->hw_version);
249 clk_disable_unprepare(dd->iclk);
252 static void atmel_tdes_dma_callback(void *data)
254 struct atmel_tdes_dev *dd = data;
256 /* dma_lch_out - completed */
257 tasklet_schedule(&dd->done_task);
260 static int atmel_tdes_write_ctrl(struct atmel_tdes_dev *dd)
263 u32 valcr = 0, valmr = TDES_MR_SMOD_PDC;
265 err = atmel_tdes_hw_init(dd);
270 if (!dd->caps.has_dma)
271 atmel_tdes_write(dd, TDES_PTCR,
272 TDES_PTCR_TXTDIS | TDES_PTCR_RXTDIS);
274 /* MR register must be set before IV registers */
275 if (dd->ctx->keylen > (DES_KEY_SIZE << 1)) {
276 valmr |= TDES_MR_KEYMOD_3KEY;
277 valmr |= TDES_MR_TDESMOD_TDES;
278 } else if (dd->ctx->keylen > DES_KEY_SIZE) {
279 valmr |= TDES_MR_KEYMOD_2KEY;
280 valmr |= TDES_MR_TDESMOD_TDES;
282 valmr |= TDES_MR_TDESMOD_DES;
285 if (dd->flags & TDES_FLAGS_CBC) {
286 valmr |= TDES_MR_OPMOD_CBC;
287 } else if (dd->flags & TDES_FLAGS_CFB) {
288 valmr |= TDES_MR_OPMOD_CFB;
290 if (dd->flags & TDES_FLAGS_CFB8)
291 valmr |= TDES_MR_CFBS_8b;
292 else if (dd->flags & TDES_FLAGS_CFB16)
293 valmr |= TDES_MR_CFBS_16b;
294 else if (dd->flags & TDES_FLAGS_CFB32)
295 valmr |= TDES_MR_CFBS_32b;
296 else if (dd->flags & TDES_FLAGS_CFB64)
297 valmr |= TDES_MR_CFBS_64b;
298 } else if (dd->flags & TDES_FLAGS_OFB) {
299 valmr |= TDES_MR_OPMOD_OFB;
302 if ((dd->flags & TDES_FLAGS_ENCRYPT) || (dd->flags & TDES_FLAGS_OFB))
303 valmr |= TDES_MR_CYPHER_ENC;
305 atmel_tdes_write(dd, TDES_CR, valcr);
306 atmel_tdes_write(dd, TDES_MR, valmr);
308 atmel_tdes_write_n(dd, TDES_KEY1W1R, dd->ctx->key,
309 dd->ctx->keylen >> 2);
311 if (((dd->flags & TDES_FLAGS_CBC) || (dd->flags & TDES_FLAGS_CFB) ||
312 (dd->flags & TDES_FLAGS_OFB)) && dd->req->iv) {
313 atmel_tdes_write_n(dd, TDES_IV1R, (void *)dd->req->iv, 2);
319 static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
324 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
326 if (dd->flags & TDES_FLAGS_FAST) {
327 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
328 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
330 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
331 dd->dma_size, DMA_FROM_DEVICE);
334 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
335 dd->buf_out, dd->buflen, dd->dma_size, 1);
336 if (count != dd->dma_size) {
338 pr_err("not all data converted: %zu\n", count);
345 static int atmel_tdes_buff_init(struct atmel_tdes_dev *dd)
349 dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
350 dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
351 dd->buflen = PAGE_SIZE;
352 dd->buflen &= ~(DES_BLOCK_SIZE - 1);
354 if (!dd->buf_in || !dd->buf_out) {
355 dev_err(dd->dev, "unable to alloc pages.\n");
360 dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
361 dd->buflen, DMA_TO_DEVICE);
362 if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
363 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
368 dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
369 dd->buflen, DMA_FROM_DEVICE);
370 if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
371 dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
379 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
383 free_page((unsigned long)dd->buf_out);
384 free_page((unsigned long)dd->buf_in);
386 pr_err("error: %d\n", err);
390 static void atmel_tdes_buff_cleanup(struct atmel_tdes_dev *dd)
392 dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
394 dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
396 free_page((unsigned long)dd->buf_out);
397 free_page((unsigned long)dd->buf_in);
400 static int atmel_tdes_crypt_pdc(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
401 dma_addr_t dma_addr_out, int length)
403 struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
404 struct atmel_tdes_dev *dd = ctx->dd;
407 dd->dma_size = length;
409 if (!(dd->flags & TDES_FLAGS_FAST)) {
410 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
414 if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB8))
415 len32 = DIV_ROUND_UP(length, sizeof(u8));
416 else if ((dd->flags & TDES_FLAGS_CFB) && (dd->flags & TDES_FLAGS_CFB16))
417 len32 = DIV_ROUND_UP(length, sizeof(u16));
419 len32 = DIV_ROUND_UP(length, sizeof(u32));
421 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
422 atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
423 atmel_tdes_write(dd, TDES_TCR, len32);
424 atmel_tdes_write(dd, TDES_RPR, dma_addr_out);
425 atmel_tdes_write(dd, TDES_RCR, len32);
427 /* Enable Interrupt */
428 atmel_tdes_write(dd, TDES_IER, TDES_INT_ENDRX);
430 /* Start DMA transfer */
431 atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTEN | TDES_PTCR_RXTEN);
436 static int atmel_tdes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in,
437 dma_addr_t dma_addr_out, int length)
439 struct atmel_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
440 struct atmel_tdes_dev *dd = ctx->dd;
441 struct scatterlist sg[2];
442 struct dma_async_tx_descriptor *in_desc, *out_desc;
444 dd->dma_size = length;
446 if (!(dd->flags & TDES_FLAGS_FAST)) {
447 dma_sync_single_for_device(dd->dev, dma_addr_in, length,
451 if (dd->flags & TDES_FLAGS_CFB8) {
452 dd->dma_lch_in.dma_conf.dst_addr_width =
453 DMA_SLAVE_BUSWIDTH_1_BYTE;
454 dd->dma_lch_out.dma_conf.src_addr_width =
455 DMA_SLAVE_BUSWIDTH_1_BYTE;
456 } else if (dd->flags & TDES_FLAGS_CFB16) {
457 dd->dma_lch_in.dma_conf.dst_addr_width =
458 DMA_SLAVE_BUSWIDTH_2_BYTES;
459 dd->dma_lch_out.dma_conf.src_addr_width =
460 DMA_SLAVE_BUSWIDTH_2_BYTES;
462 dd->dma_lch_in.dma_conf.dst_addr_width =
463 DMA_SLAVE_BUSWIDTH_4_BYTES;
464 dd->dma_lch_out.dma_conf.src_addr_width =
465 DMA_SLAVE_BUSWIDTH_4_BYTES;
468 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
469 dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
471 dd->flags |= TDES_FLAGS_DMA;
473 sg_init_table(&sg[0], 1);
474 sg_dma_address(&sg[0]) = dma_addr_in;
475 sg_dma_len(&sg[0]) = length;
477 sg_init_table(&sg[1], 1);
478 sg_dma_address(&sg[1]) = dma_addr_out;
479 sg_dma_len(&sg[1]) = length;
481 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
483 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
487 out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
489 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
493 out_desc->callback = atmel_tdes_dma_callback;
494 out_desc->callback_param = dd;
496 dmaengine_submit(out_desc);
497 dma_async_issue_pending(dd->dma_lch_out.chan);
499 dmaengine_submit(in_desc);
500 dma_async_issue_pending(dd->dma_lch_in.chan);
505 static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
507 struct crypto_tfm *tfm = crypto_skcipher_tfm(
508 crypto_skcipher_reqtfm(dd->req));
509 int err, fast = 0, in, out;
511 dma_addr_t addr_in, addr_out;
513 if ((!dd->in_offset) && (!dd->out_offset)) {
514 /* check for alignment */
515 in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
516 IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
517 out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
518 IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
521 if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
527 count = min_t(size_t, dd->total, sg_dma_len(dd->in_sg));
528 count = min_t(size_t, count, sg_dma_len(dd->out_sg));
530 err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
532 dev_err(dd->dev, "dma_map_sg() error\n");
536 err = dma_map_sg(dd->dev, dd->out_sg, 1,
539 dev_err(dd->dev, "dma_map_sg() error\n");
540 dma_unmap_sg(dd->dev, dd->in_sg, 1,
545 addr_in = sg_dma_address(dd->in_sg);
546 addr_out = sg_dma_address(dd->out_sg);
548 dd->flags |= TDES_FLAGS_FAST;
551 /* use cache buffers */
552 count = atmel_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
553 dd->buf_in, dd->buflen, dd->total, 0);
555 addr_in = dd->dma_addr_in;
556 addr_out = dd->dma_addr_out;
558 dd->flags &= ~TDES_FLAGS_FAST;
563 if (dd->caps.has_dma)
564 err = atmel_tdes_crypt_dma(tfm, addr_in, addr_out, count);
566 err = atmel_tdes_crypt_pdc(tfm, addr_in, addr_out, count);
568 if (err && (dd->flags & TDES_FLAGS_FAST)) {
569 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
570 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
577 atmel_tdes_set_iv_as_last_ciphertext_block(struct atmel_tdes_dev *dd)
579 struct skcipher_request *req = dd->req;
580 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
581 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
582 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
584 if (req->cryptlen < ivsize)
587 if (rctx->mode & TDES_FLAGS_ENCRYPT) {
588 scatterwalk_map_and_copy(req->iv, req->dst,
589 req->cryptlen - ivsize, ivsize, 0);
591 if (req->src == req->dst)
592 memcpy(req->iv, rctx->lastc, ivsize);
594 scatterwalk_map_and_copy(req->iv, req->src,
595 req->cryptlen - ivsize,
600 static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
602 struct skcipher_request *req = dd->req;
604 clk_disable_unprepare(dd->iclk);
606 dd->flags &= ~TDES_FLAGS_BUSY;
608 atmel_tdes_set_iv_as_last_ciphertext_block(dd);
610 req->base.complete(&req->base, err);
613 static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
614 struct skcipher_request *req)
616 struct crypto_async_request *async_req, *backlog;
617 struct atmel_tdes_ctx *ctx;
618 struct atmel_tdes_reqctx *rctx;
622 spin_lock_irqsave(&dd->lock, flags);
624 ret = crypto_enqueue_request(&dd->queue, &req->base);
625 if (dd->flags & TDES_FLAGS_BUSY) {
626 spin_unlock_irqrestore(&dd->lock, flags);
629 backlog = crypto_get_backlog(&dd->queue);
630 async_req = crypto_dequeue_request(&dd->queue);
632 dd->flags |= TDES_FLAGS_BUSY;
633 spin_unlock_irqrestore(&dd->lock, flags);
639 backlog->complete(backlog, -EINPROGRESS);
641 req = skcipher_request_cast(async_req);
643 /* assign new request to device */
645 dd->total = req->cryptlen;
647 dd->in_sg = req->src;
649 dd->out_sg = req->dst;
651 rctx = skcipher_request_ctx(req);
652 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
653 rctx->mode &= TDES_FLAGS_MODE_MASK;
654 dd->flags = (dd->flags & ~TDES_FLAGS_MODE_MASK) | rctx->mode;
658 err = atmel_tdes_write_ctrl(dd);
660 err = atmel_tdes_crypt_start(dd);
662 /* des_task will not finish it, so do it here */
663 atmel_tdes_finish_req(dd, err);
664 tasklet_schedule(&dd->queue_task);
670 static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
675 if (dd->flags & TDES_FLAGS_DMA) {
677 if (dd->flags & TDES_FLAGS_FAST) {
678 dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
679 dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
681 dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
682 dd->dma_size, DMA_FROM_DEVICE);
685 count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
686 dd->buf_out, dd->buflen, dd->dma_size, 1);
687 if (count != dd->dma_size) {
689 pr_err("not all data converted: %zu\n", count);
696 static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
698 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
699 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(skcipher);
700 struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(req);
702 if (mode & TDES_FLAGS_CFB8) {
703 if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
704 pr_err("request size is not exact amount of CFB8 blocks\n");
707 ctx->block_size = CFB8_BLOCK_SIZE;
708 } else if (mode & TDES_FLAGS_CFB16) {
709 if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
710 pr_err("request size is not exact amount of CFB16 blocks\n");
713 ctx->block_size = CFB16_BLOCK_SIZE;
714 } else if (mode & TDES_FLAGS_CFB32) {
715 if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
716 pr_err("request size is not exact amount of CFB32 blocks\n");
719 ctx->block_size = CFB32_BLOCK_SIZE;
721 if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
722 pr_err("request size is not exact amount of DES blocks\n");
725 ctx->block_size = DES_BLOCK_SIZE;
730 if (!(mode & TDES_FLAGS_ENCRYPT) && req->src == req->dst) {
731 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
733 if (req->cryptlen >= ivsize)
734 scatterwalk_map_and_copy(rctx->lastc, req->src,
735 req->cryptlen - ivsize,
739 return atmel_tdes_handle_queue(ctx->dd, req);
742 static bool atmel_tdes_filter(struct dma_chan *chan, void *slave)
744 struct at_dma_slave *sl = slave;
746 if (sl && sl->dma_dev == chan->device->dev) {
754 static int atmel_tdes_dma_init(struct atmel_tdes_dev *dd,
755 struct crypto_platform_data *pdata)
760 dma_cap_set(DMA_SLAVE, mask);
762 /* Try to grab 2 DMA channels */
763 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask,
764 atmel_tdes_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
765 if (!dd->dma_lch_in.chan)
768 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
769 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
771 dd->dma_lch_in.dma_conf.src_maxburst = 1;
772 dd->dma_lch_in.dma_conf.src_addr_width =
773 DMA_SLAVE_BUSWIDTH_4_BYTES;
774 dd->dma_lch_in.dma_conf.dst_maxburst = 1;
775 dd->dma_lch_in.dma_conf.dst_addr_width =
776 DMA_SLAVE_BUSWIDTH_4_BYTES;
777 dd->dma_lch_in.dma_conf.device_fc = false;
779 dd->dma_lch_out.chan = dma_request_slave_channel_compat(mask,
780 atmel_tdes_filter, &pdata->dma_slave->txdata, dd->dev, "rx");
781 if (!dd->dma_lch_out.chan)
784 dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
785 dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
787 dd->dma_lch_out.dma_conf.src_maxburst = 1;
788 dd->dma_lch_out.dma_conf.src_addr_width =
789 DMA_SLAVE_BUSWIDTH_4_BYTES;
790 dd->dma_lch_out.dma_conf.dst_maxburst = 1;
791 dd->dma_lch_out.dma_conf.dst_addr_width =
792 DMA_SLAVE_BUSWIDTH_4_BYTES;
793 dd->dma_lch_out.dma_conf.device_fc = false;
798 dma_release_channel(dd->dma_lch_in.chan);
800 dev_warn(dd->dev, "no DMA channel available\n");
804 static void atmel_tdes_dma_cleanup(struct atmel_tdes_dev *dd)
806 dma_release_channel(dd->dma_lch_in.chan);
807 dma_release_channel(dd->dma_lch_out.chan);
810 static int atmel_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
813 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
816 err = verify_skcipher_des_key(tfm, key);
820 memcpy(ctx->key, key, keylen);
821 ctx->keylen = keylen;
826 static int atmel_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key,
829 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
832 err = verify_skcipher_des3_key(tfm, key);
836 memcpy(ctx->key, key, keylen);
837 ctx->keylen = keylen;
842 static int atmel_tdes_ecb_encrypt(struct skcipher_request *req)
844 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT);
847 static int atmel_tdes_ecb_decrypt(struct skcipher_request *req)
849 return atmel_tdes_crypt(req, 0);
852 static int atmel_tdes_cbc_encrypt(struct skcipher_request *req)
854 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CBC);
857 static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
859 return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
861 static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
863 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB);
866 static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
868 return atmel_tdes_crypt(req, TDES_FLAGS_CFB);
871 static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
873 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
877 static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
879 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB8);
882 static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
884 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
888 static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
890 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB16);
893 static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
895 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_CFB |
899 static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
901 return atmel_tdes_crypt(req, TDES_FLAGS_CFB | TDES_FLAGS_CFB32);
904 static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
906 return atmel_tdes_crypt(req, TDES_FLAGS_ENCRYPT | TDES_FLAGS_OFB);
909 static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
911 return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
914 static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
916 struct atmel_tdes_ctx *ctx = crypto_skcipher_ctx(tfm);
917 struct atmel_tdes_dev *dd;
919 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_tdes_reqctx));
921 dd = atmel_tdes_find_dev(ctx);
928 static struct skcipher_alg tdes_algs[] = {
930 .base.cra_name = "ecb(des)",
931 .base.cra_driver_name = "atmel-ecb-des",
932 .base.cra_priority = 100,
933 .base.cra_flags = CRYPTO_ALG_ASYNC,
934 .base.cra_blocksize = DES_BLOCK_SIZE,
935 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
936 .base.cra_alignmask = 0x7,
937 .base.cra_module = THIS_MODULE,
939 .init = atmel_tdes_init_tfm,
940 .min_keysize = DES_KEY_SIZE,
941 .max_keysize = DES_KEY_SIZE,
942 .setkey = atmel_des_setkey,
943 .encrypt = atmel_tdes_ecb_encrypt,
944 .decrypt = atmel_tdes_ecb_decrypt,
947 .base.cra_name = "cbc(des)",
948 .base.cra_driver_name = "atmel-cbc-des",
949 .base.cra_priority = 100,
950 .base.cra_flags = CRYPTO_ALG_ASYNC,
951 .base.cra_blocksize = DES_BLOCK_SIZE,
952 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
953 .base.cra_alignmask = 0x7,
954 .base.cra_module = THIS_MODULE,
956 .init = atmel_tdes_init_tfm,
957 .min_keysize = DES_KEY_SIZE,
958 .max_keysize = DES_KEY_SIZE,
959 .ivsize = DES_BLOCK_SIZE,
960 .setkey = atmel_des_setkey,
961 .encrypt = atmel_tdes_cbc_encrypt,
962 .decrypt = atmel_tdes_cbc_decrypt,
965 .base.cra_name = "cfb(des)",
966 .base.cra_driver_name = "atmel-cfb-des",
967 .base.cra_priority = 100,
968 .base.cra_flags = CRYPTO_ALG_ASYNC,
969 .base.cra_blocksize = DES_BLOCK_SIZE,
970 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
971 .base.cra_alignmask = 0x7,
972 .base.cra_module = THIS_MODULE,
974 .init = atmel_tdes_init_tfm,
975 .min_keysize = DES_KEY_SIZE,
976 .max_keysize = DES_KEY_SIZE,
977 .ivsize = DES_BLOCK_SIZE,
978 .setkey = atmel_des_setkey,
979 .encrypt = atmel_tdes_cfb_encrypt,
980 .decrypt = atmel_tdes_cfb_decrypt,
983 .base.cra_name = "cfb8(des)",
984 .base.cra_driver_name = "atmel-cfb8-des",
985 .base.cra_priority = 100,
986 .base.cra_flags = CRYPTO_ALG_ASYNC,
987 .base.cra_blocksize = CFB8_BLOCK_SIZE,
988 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
989 .base.cra_alignmask = 0,
990 .base.cra_module = THIS_MODULE,
992 .init = atmel_tdes_init_tfm,
993 .min_keysize = DES_KEY_SIZE,
994 .max_keysize = DES_KEY_SIZE,
995 .ivsize = DES_BLOCK_SIZE,
996 .setkey = atmel_des_setkey,
997 .encrypt = atmel_tdes_cfb8_encrypt,
998 .decrypt = atmel_tdes_cfb8_decrypt,
1001 .base.cra_name = "cfb16(des)",
1002 .base.cra_driver_name = "atmel-cfb16-des",
1003 .base.cra_priority = 100,
1004 .base.cra_flags = CRYPTO_ALG_ASYNC,
1005 .base.cra_blocksize = CFB16_BLOCK_SIZE,
1006 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1007 .base.cra_alignmask = 0x1,
1008 .base.cra_module = THIS_MODULE,
1010 .init = atmel_tdes_init_tfm,
1011 .min_keysize = DES_KEY_SIZE,
1012 .max_keysize = DES_KEY_SIZE,
1013 .ivsize = DES_BLOCK_SIZE,
1014 .setkey = atmel_des_setkey,
1015 .encrypt = atmel_tdes_cfb16_encrypt,
1016 .decrypt = atmel_tdes_cfb16_decrypt,
1019 .base.cra_name = "cfb32(des)",
1020 .base.cra_driver_name = "atmel-cfb32-des",
1021 .base.cra_priority = 100,
1022 .base.cra_flags = CRYPTO_ALG_ASYNC,
1023 .base.cra_blocksize = CFB32_BLOCK_SIZE,
1024 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1025 .base.cra_alignmask = 0x3,
1026 .base.cra_module = THIS_MODULE,
1028 .init = atmel_tdes_init_tfm,
1029 .min_keysize = DES_KEY_SIZE,
1030 .max_keysize = DES_KEY_SIZE,
1031 .ivsize = DES_BLOCK_SIZE,
1032 .setkey = atmel_des_setkey,
1033 .encrypt = atmel_tdes_cfb32_encrypt,
1034 .decrypt = atmel_tdes_cfb32_decrypt,
1037 .base.cra_name = "ofb(des)",
1038 .base.cra_driver_name = "atmel-ofb-des",
1039 .base.cra_priority = 100,
1040 .base.cra_flags = CRYPTO_ALG_ASYNC,
1041 .base.cra_blocksize = DES_BLOCK_SIZE,
1042 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1043 .base.cra_alignmask = 0x7,
1044 .base.cra_module = THIS_MODULE,
1046 .init = atmel_tdes_init_tfm,
1047 .min_keysize = DES_KEY_SIZE,
1048 .max_keysize = DES_KEY_SIZE,
1049 .ivsize = DES_BLOCK_SIZE,
1050 .setkey = atmel_des_setkey,
1051 .encrypt = atmel_tdes_ofb_encrypt,
1052 .decrypt = atmel_tdes_ofb_decrypt,
1055 .base.cra_name = "ecb(des3_ede)",
1056 .base.cra_driver_name = "atmel-ecb-tdes",
1057 .base.cra_priority = 100,
1058 .base.cra_flags = CRYPTO_ALG_ASYNC,
1059 .base.cra_blocksize = DES_BLOCK_SIZE,
1060 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1061 .base.cra_alignmask = 0x7,
1062 .base.cra_module = THIS_MODULE,
1064 .init = atmel_tdes_init_tfm,
1065 .min_keysize = DES3_EDE_KEY_SIZE,
1066 .max_keysize = DES3_EDE_KEY_SIZE,
1067 .setkey = atmel_tdes_setkey,
1068 .encrypt = atmel_tdes_ecb_encrypt,
1069 .decrypt = atmel_tdes_ecb_decrypt,
1072 .base.cra_name = "cbc(des3_ede)",
1073 .base.cra_driver_name = "atmel-cbc-tdes",
1074 .base.cra_priority = 100,
1075 .base.cra_flags = CRYPTO_ALG_ASYNC,
1076 .base.cra_blocksize = DES_BLOCK_SIZE,
1077 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1078 .base.cra_alignmask = 0x7,
1079 .base.cra_module = THIS_MODULE,
1081 .init = atmel_tdes_init_tfm,
1082 .min_keysize = DES3_EDE_KEY_SIZE,
1083 .max_keysize = DES3_EDE_KEY_SIZE,
1084 .setkey = atmel_tdes_setkey,
1085 .encrypt = atmel_tdes_cbc_encrypt,
1086 .decrypt = atmel_tdes_cbc_decrypt,
1087 .ivsize = DES_BLOCK_SIZE,
1090 .base.cra_name = "ofb(des3_ede)",
1091 .base.cra_driver_name = "atmel-ofb-tdes",
1092 .base.cra_priority = 100,
1093 .base.cra_flags = CRYPTO_ALG_ASYNC,
1094 .base.cra_blocksize = DES_BLOCK_SIZE,
1095 .base.cra_ctxsize = sizeof(struct atmel_tdes_ctx),
1096 .base.cra_alignmask = 0x7,
1097 .base.cra_module = THIS_MODULE,
1099 .init = atmel_tdes_init_tfm,
1100 .min_keysize = DES3_EDE_KEY_SIZE,
1101 .max_keysize = DES3_EDE_KEY_SIZE,
1102 .setkey = atmel_tdes_setkey,
1103 .encrypt = atmel_tdes_ofb_encrypt,
1104 .decrypt = atmel_tdes_ofb_decrypt,
1105 .ivsize = DES_BLOCK_SIZE,
1109 static void atmel_tdes_queue_task(unsigned long data)
1111 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *)data;
1113 atmel_tdes_handle_queue(dd, NULL);
1116 static void atmel_tdes_done_task(unsigned long data)
1118 struct atmel_tdes_dev *dd = (struct atmel_tdes_dev *) data;
1121 if (!(dd->flags & TDES_FLAGS_DMA))
1122 err = atmel_tdes_crypt_pdc_stop(dd);
1124 err = atmel_tdes_crypt_dma_stop(dd);
1126 err = dd->err ? : err;
1128 if (dd->total && !err) {
1129 if (dd->flags & TDES_FLAGS_FAST) {
1130 dd->in_sg = sg_next(dd->in_sg);
1131 dd->out_sg = sg_next(dd->out_sg);
1132 if (!dd->in_sg || !dd->out_sg)
1136 err = atmel_tdes_crypt_start(dd);
1138 return; /* DMA started. Not fininishing. */
1141 atmel_tdes_finish_req(dd, err);
1142 atmel_tdes_handle_queue(dd, NULL);
1145 static irqreturn_t atmel_tdes_irq(int irq, void *dev_id)
1147 struct atmel_tdes_dev *tdes_dd = dev_id;
1150 reg = atmel_tdes_read(tdes_dd, TDES_ISR);
1151 if (reg & atmel_tdes_read(tdes_dd, TDES_IMR)) {
1152 atmel_tdes_write(tdes_dd, TDES_IDR, reg);
1153 if (TDES_FLAGS_BUSY & tdes_dd->flags)
1154 tasklet_schedule(&tdes_dd->done_task);
1156 dev_warn(tdes_dd->dev, "TDES interrupt when no active requests.\n");
1163 static void atmel_tdes_unregister_algs(struct atmel_tdes_dev *dd)
1167 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++)
1168 crypto_unregister_skcipher(&tdes_algs[i]);
1171 static int atmel_tdes_register_algs(struct atmel_tdes_dev *dd)
1175 for (i = 0; i < ARRAY_SIZE(tdes_algs); i++) {
1176 err = crypto_register_skcipher(&tdes_algs[i]);
1184 for (j = 0; j < i; j++)
1185 crypto_unregister_skcipher(&tdes_algs[j]);
1190 static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
1193 dd->caps.has_dma = 0;
1194 dd->caps.has_cfb_3keys = 0;
1196 /* keep only major version number */
1197 switch (dd->hw_version & 0xf00) {
1199 dd->caps.has_dma = 1;
1200 dd->caps.has_cfb_3keys = 1;
1206 "Unmanaged tdes version, set minimum capabilities\n");
1211 #if defined(CONFIG_OF)
1212 static const struct of_device_id atmel_tdes_dt_ids[] = {
1213 { .compatible = "atmel,at91sam9g46-tdes" },
1216 MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
1218 static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
1220 struct device_node *np = pdev->dev.of_node;
1221 struct crypto_platform_data *pdata;
1224 dev_err(&pdev->dev, "device node not found\n");
1225 return ERR_PTR(-EINVAL);
1228 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1230 return ERR_PTR(-ENOMEM);
1232 pdata->dma_slave = devm_kzalloc(&pdev->dev,
1233 sizeof(*(pdata->dma_slave)),
1235 if (!pdata->dma_slave)
1236 return ERR_PTR(-ENOMEM);
1240 #else /* CONFIG_OF */
1241 static inline struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *pdev)
1243 return ERR_PTR(-EINVAL);
1247 static int atmel_tdes_probe(struct platform_device *pdev)
1249 struct atmel_tdes_dev *tdes_dd;
1250 struct crypto_platform_data *pdata;
1251 struct device *dev = &pdev->dev;
1252 struct resource *tdes_res;
1255 tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
1256 if (tdes_dd == NULL) {
1263 platform_set_drvdata(pdev, tdes_dd);
1265 INIT_LIST_HEAD(&tdes_dd->list);
1266 spin_lock_init(&tdes_dd->lock);
1268 tasklet_init(&tdes_dd->done_task, atmel_tdes_done_task,
1269 (unsigned long)tdes_dd);
1270 tasklet_init(&tdes_dd->queue_task, atmel_tdes_queue_task,
1271 (unsigned long)tdes_dd);
1273 crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
1275 /* Get the base address */
1276 tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1278 dev_err(dev, "no MEM resource info\n");
1282 tdes_dd->phys_base = tdes_res->start;
1285 tdes_dd->irq = platform_get_irq(pdev, 0);
1286 if (tdes_dd->irq < 0) {
1291 err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
1292 IRQF_SHARED, "atmel-tdes", tdes_dd);
1294 dev_err(dev, "unable to request tdes irq.\n");
1298 /* Initializing the clock */
1299 tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
1300 if (IS_ERR(tdes_dd->iclk)) {
1301 dev_err(dev, "clock initialization failed.\n");
1302 err = PTR_ERR(tdes_dd->iclk);
1306 tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
1307 if (IS_ERR(tdes_dd->io_base)) {
1308 dev_err(dev, "can't ioremap\n");
1309 err = PTR_ERR(tdes_dd->io_base);
1313 atmel_tdes_hw_version_init(tdes_dd);
1315 atmel_tdes_get_cap(tdes_dd);
1317 err = atmel_tdes_buff_init(tdes_dd);
1321 if (tdes_dd->caps.has_dma) {
1322 pdata = pdev->dev.platform_data;
1324 pdata = atmel_tdes_of_init(pdev);
1325 if (IS_ERR(pdata)) {
1326 dev_err(&pdev->dev, "platform data not available\n");
1327 err = PTR_ERR(pdata);
1331 if (!pdata->dma_slave) {
1335 err = atmel_tdes_dma_init(tdes_dd, pdata);
1339 dev_info(dev, "using %s, %s for DMA transfers\n",
1340 dma_chan_name(tdes_dd->dma_lch_in.chan),
1341 dma_chan_name(tdes_dd->dma_lch_out.chan));
1344 spin_lock(&atmel_tdes.lock);
1345 list_add_tail(&tdes_dd->list, &atmel_tdes.dev_list);
1346 spin_unlock(&atmel_tdes.lock);
1348 err = atmel_tdes_register_algs(tdes_dd);
1352 dev_info(dev, "Atmel DES/TDES\n");
1357 spin_lock(&atmel_tdes.lock);
1358 list_del(&tdes_dd->list);
1359 spin_unlock(&atmel_tdes.lock);
1360 if (tdes_dd->caps.has_dma)
1361 atmel_tdes_dma_cleanup(tdes_dd);
1364 atmel_tdes_buff_cleanup(tdes_dd);
1367 tasklet_kill(&tdes_dd->done_task);
1368 tasklet_kill(&tdes_dd->queue_task);
1370 dev_err(dev, "initialization failed.\n");
1375 static int atmel_tdes_remove(struct platform_device *pdev)
1377 struct atmel_tdes_dev *tdes_dd;
1379 tdes_dd = platform_get_drvdata(pdev);
1382 spin_lock(&atmel_tdes.lock);
1383 list_del(&tdes_dd->list);
1384 spin_unlock(&atmel_tdes.lock);
1386 atmel_tdes_unregister_algs(tdes_dd);
1388 tasklet_kill(&tdes_dd->done_task);
1389 tasklet_kill(&tdes_dd->queue_task);
1391 if (tdes_dd->caps.has_dma)
1392 atmel_tdes_dma_cleanup(tdes_dd);
1394 atmel_tdes_buff_cleanup(tdes_dd);
1399 static struct platform_driver atmel_tdes_driver = {
1400 .probe = atmel_tdes_probe,
1401 .remove = atmel_tdes_remove,
1403 .name = "atmel_tdes",
1404 .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
1408 module_platform_driver(atmel_tdes_driver);
1410 MODULE_DESCRIPTION("Atmel DES/TDES hw acceleration support.");
1411 MODULE_LICENSE("GPL v2");
1412 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");