1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) driver
5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/des.h>
17 #include <linux/ccp.h>
21 /* SHA initial context values */
22 static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = {
23 cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
24 cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
28 static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
29 cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
30 cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
31 cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
32 cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
35 static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
36 cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
37 cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
38 cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
39 cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
42 static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
43 cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
44 cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
45 cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
46 cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
49 static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
50 cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
51 cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
52 cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
53 cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
56 #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
57 ccp_gen_jobid(ccp) : 0)
59 static u32 ccp_gen_jobid(struct ccp_device *ccp)
61 return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
64 static void ccp_sg_free(struct ccp_sg_workarea *wa)
67 dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
72 static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
73 struct scatterlist *sg, u64 len,
74 enum dma_data_direction dma_dir)
76 memset(wa, 0, sizeof(*wa));
82 wa->nents = sg_nents_for_len(sg, len);
92 if (dma_dir == DMA_NONE)
97 wa->dma_dir = dma_dir;
98 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
105 static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
107 unsigned int nbytes = min_t(u64, len, wa->bytes_left);
112 wa->sg_used += nbytes;
113 wa->bytes_left -= nbytes;
114 if (wa->sg_used == wa->sg->length) {
115 wa->sg = sg_next(wa->sg);
120 static void ccp_dm_free(struct ccp_dm_workarea *wa)
122 if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
124 dma_pool_free(wa->dma_pool, wa->address,
128 dma_unmap_single(wa->dev, wa->dma.address, wa->length,
137 static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
138 struct ccp_cmd_queue *cmd_q,
140 enum dma_data_direction dir)
142 memset(wa, 0, sizeof(*wa));
147 wa->dev = cmd_q->ccp->dev;
150 if (len <= CCP_DMAPOOL_MAX_SIZE) {
151 wa->dma_pool = cmd_q->dma_pool;
153 wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL,
158 wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
161 wa->address = kzalloc(len, GFP_KERNEL);
165 wa->dma.address = dma_map_single(wa->dev, wa->address, len,
167 if (dma_mapping_error(wa->dev, wa->dma.address))
170 wa->dma.length = len;
177 static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
178 struct scatterlist *sg, unsigned int sg_offset,
181 WARN_ON(!wa->address);
183 if (len > (wa->length - wa_offset))
186 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
191 static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
192 struct scatterlist *sg, unsigned int sg_offset,
195 WARN_ON(!wa->address);
197 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
201 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
202 unsigned int wa_offset,
203 struct scatterlist *sg,
204 unsigned int sg_offset,
210 rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
214 p = wa->address + wa_offset;
226 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
227 unsigned int wa_offset,
228 struct scatterlist *sg,
229 unsigned int sg_offset,
234 p = wa->address + wa_offset;
244 ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
247 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
249 ccp_dm_free(&data->dm_wa);
250 ccp_sg_free(&data->sg_wa);
253 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
254 struct scatterlist *sg, u64 sg_len,
256 enum dma_data_direction dir)
260 memset(data, 0, sizeof(*data));
262 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
267 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
274 ccp_free_data(data, cmd_q);
279 static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
281 struct ccp_sg_workarea *sg_wa = &data->sg_wa;
282 struct ccp_dm_workarea *dm_wa = &data->dm_wa;
283 unsigned int buf_count, nbytes;
285 /* Clear the buffer if setting it */
287 memset(dm_wa->address, 0, dm_wa->length);
292 /* Perform the copy operation
293 * nbytes will always be <= UINT_MAX because dm_wa->length is
296 nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
297 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
300 /* Update the structures and generate the count */
302 while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
303 nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
304 dm_wa->length - buf_count);
305 nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
308 ccp_update_sg_workarea(sg_wa, nbytes);
314 static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
316 return ccp_queue_buf(data, 0);
319 static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
321 return ccp_queue_buf(data, 1);
324 static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
325 struct ccp_op *op, unsigned int block_size,
328 unsigned int sg_src_len, sg_dst_len, op_len;
330 /* The CCP can only DMA from/to one address each per operation. This
331 * requires that we find the smallest DMA area between the source
332 * and destination. The resulting len values will always be <= UINT_MAX
333 * because the dma length is an unsigned int.
335 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
336 sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
339 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
340 sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
341 op_len = min(sg_src_len, sg_dst_len);
346 /* The data operation length will be at least block_size in length
347 * or the smaller of available sg room remaining for the source or
350 op_len = max(op_len, block_size);
352 /* Unless we have to buffer data, there's no reason to wait */
355 if (sg_src_len < block_size) {
356 /* Not enough data in the sg element, so it
357 * needs to be buffered into a blocksize chunk
359 int cp_len = ccp_fill_queue_buf(src);
362 op->src.u.dma.address = src->dm_wa.dma.address;
363 op->src.u.dma.offset = 0;
364 op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
366 /* Enough data in the sg element, but we need to
367 * adjust for any previously copied data
369 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
370 op->src.u.dma.offset = src->sg_wa.sg_used;
371 op->src.u.dma.length = op_len & ~(block_size - 1);
373 ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
377 if (sg_dst_len < block_size) {
378 /* Not enough room in the sg element or we're on the
379 * last piece of data (when using padding), so the
380 * output needs to be buffered into a blocksize chunk
383 op->dst.u.dma.address = dst->dm_wa.dma.address;
384 op->dst.u.dma.offset = 0;
385 op->dst.u.dma.length = op->src.u.dma.length;
387 /* Enough room in the sg element, but we need to
388 * adjust for any previously used area
390 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
391 op->dst.u.dma.offset = dst->sg_wa.sg_used;
392 op->dst.u.dma.length = op->src.u.dma.length;
397 static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
403 if (op->dst.u.dma.address == dst->dm_wa.dma.address)
404 ccp_empty_queue_buf(dst);
406 ccp_update_sg_workarea(&dst->sg_wa,
407 op->dst.u.dma.length);
411 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q,
412 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
413 u32 byte_swap, bool from)
417 memset(&op, 0, sizeof(op));
425 op.src.type = CCP_MEMTYPE_SB;
427 op.dst.type = CCP_MEMTYPE_SYSTEM;
428 op.dst.u.dma.address = wa->dma.address;
429 op.dst.u.dma.length = wa->length;
431 op.src.type = CCP_MEMTYPE_SYSTEM;
432 op.src.u.dma.address = wa->dma.address;
433 op.src.u.dma.length = wa->length;
434 op.dst.type = CCP_MEMTYPE_SB;
438 op.u.passthru.byte_swap = byte_swap;
440 return cmd_q->ccp->vdata->perform->passthru(&op);
443 static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q,
444 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
447 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false);
450 static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
451 struct ccp_dm_workarea *wa, u32 jobid, u32 sb,
454 return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
457 static noinline_for_stack int
458 ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
460 struct ccp_aes_engine *aes = &cmd->u.aes;
461 struct ccp_dm_workarea key, ctx;
464 unsigned int dm_offset;
467 if (!((aes->key_len == AES_KEYSIZE_128) ||
468 (aes->key_len == AES_KEYSIZE_192) ||
469 (aes->key_len == AES_KEYSIZE_256)))
472 if (aes->src_len & (AES_BLOCK_SIZE - 1))
475 if (aes->iv_len != AES_BLOCK_SIZE)
478 if (!aes->key || !aes->iv || !aes->src)
481 if (aes->cmac_final) {
482 if (aes->cmac_key_len != AES_BLOCK_SIZE)
489 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
490 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
493 memset(&op, 0, sizeof(op));
495 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
496 op.sb_key = cmd_q->sb_key;
497 op.sb_ctx = cmd_q->sb_ctx;
499 op.u.aes.type = aes->type;
500 op.u.aes.mode = aes->mode;
501 op.u.aes.action = aes->action;
503 /* All supported key sizes fit in a single (32-byte) SB entry
504 * and must be in little endian format. Use the 256-bit byte
505 * swap passthru option to convert from big endian to little
508 ret = ccp_init_dm_workarea(&key, cmd_q,
509 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
514 dm_offset = CCP_SB_BYTES - aes->key_len;
515 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
518 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
519 CCP_PASSTHRU_BYTESWAP_256BIT);
521 cmd->engine_error = cmd_q->cmd_error;
525 /* The AES context fits in a single (32-byte) SB entry and
526 * must be in little endian format. Use the 256-bit byte swap
527 * passthru option to convert from big endian to little endian.
529 ret = ccp_init_dm_workarea(&ctx, cmd_q,
530 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
535 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
536 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
539 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
540 CCP_PASSTHRU_BYTESWAP_256BIT);
542 cmd->engine_error = cmd_q->cmd_error;
546 /* Send data to the CCP AES engine */
547 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
548 AES_BLOCK_SIZE, DMA_TO_DEVICE);
552 while (src.sg_wa.bytes_left) {
553 ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
554 if (aes->cmac_final && !src.sg_wa.bytes_left) {
557 /* Push the K1/K2 key to the CCP now */
558 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid,
560 CCP_PASSTHRU_BYTESWAP_256BIT);
562 cmd->engine_error = cmd_q->cmd_error;
566 ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
570 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
571 CCP_PASSTHRU_BYTESWAP_256BIT);
573 cmd->engine_error = cmd_q->cmd_error;
578 ret = cmd_q->ccp->vdata->perform->aes(&op);
580 cmd->engine_error = cmd_q->cmd_error;
584 ccp_process_data(&src, NULL, &op);
587 /* Retrieve the AES context - convert from LE to BE using
588 * 32-byte (256-bit) byteswapping
590 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
591 CCP_PASSTHRU_BYTESWAP_256BIT);
593 cmd->engine_error = cmd_q->cmd_error;
597 /* ...but we only need AES_BLOCK_SIZE bytes */
598 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
599 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
602 ccp_free_data(&src, cmd_q);
613 static noinline_for_stack int
614 ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
616 struct ccp_aes_engine *aes = &cmd->u.aes;
617 struct ccp_dm_workarea key, ctx, final_wa, tag;
618 struct ccp_data src, dst;
622 unsigned long long *final;
623 unsigned int dm_offset;
626 bool in_place = true; /* Default value */
629 struct scatterlist *p_inp, sg_inp[2];
630 struct scatterlist *p_tag, sg_tag[2];
631 struct scatterlist *p_outp, sg_outp[2];
632 struct scatterlist *p_aad;
637 if (!((aes->key_len == AES_KEYSIZE_128) ||
638 (aes->key_len == AES_KEYSIZE_192) ||
639 (aes->key_len == AES_KEYSIZE_256)))
642 if (!aes->key) /* Gotta have a key SGL */
645 /* First, decompose the source buffer into AAD & PT,
646 * and the destination buffer into AAD, CT & tag, or
647 * the input into CT & tag.
648 * It is expected that the input and output SGs will
649 * be valid, even if the AAD and input lengths are 0.
652 p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
653 p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
654 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
656 p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
658 /* Input length for decryption includes tag */
659 ilen = aes->src_len - AES_BLOCK_SIZE;
660 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
663 jobid = CCP_NEW_JOBID(cmd_q->ccp);
665 memset(&op, 0, sizeof(op));
668 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
669 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
671 op.u.aes.type = aes->type;
673 /* Copy the key to the LSB */
674 ret = ccp_init_dm_workarea(&key, cmd_q,
675 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
680 dm_offset = CCP_SB_BYTES - aes->key_len;
681 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
684 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
685 CCP_PASSTHRU_BYTESWAP_256BIT);
687 cmd->engine_error = cmd_q->cmd_error;
691 /* Copy the context (IV) to the LSB.
692 * There is an assumption here that the IV is 96 bits in length, plus
693 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
695 ret = ccp_init_dm_workarea(&ctx, cmd_q,
696 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
701 dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
702 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
706 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
707 CCP_PASSTHRU_BYTESWAP_256BIT);
709 cmd->engine_error = cmd_q->cmd_error;
714 if (aes->aad_len > 0) {
715 /* Step 1: Run a GHASH over the Additional Authenticated Data */
716 ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
722 op.u.aes.mode = CCP_AES_MODE_GHASH;
723 op.u.aes.action = CCP_AES_GHASHAAD;
725 while (aad.sg_wa.bytes_left) {
726 ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
728 ret = cmd_q->ccp->vdata->perform->aes(&op);
730 cmd->engine_error = cmd_q->cmd_error;
734 ccp_process_data(&aad, NULL, &op);
739 op.u.aes.mode = CCP_AES_MODE_GCTR;
740 op.u.aes.action = aes->action;
743 /* Step 2: Run a GCTR over the plaintext */
744 in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
746 ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
748 in_place ? DMA_BIDIRECTIONAL
756 ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
757 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
765 while (src.sg_wa.bytes_left) {
766 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
767 if (!src.sg_wa.bytes_left) {
768 unsigned int nbytes = aes->src_len
773 op.u.aes.size = (nbytes * 8) - 1;
777 ret = cmd_q->ccp->vdata->perform->aes(&op);
779 cmd->engine_error = cmd_q->cmd_error;
783 ccp_process_data(&src, &dst, &op);
788 /* Step 3: Update the IV portion of the context with the original IV */
789 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
790 CCP_PASSTHRU_BYTESWAP_256BIT);
792 cmd->engine_error = cmd_q->cmd_error;
796 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
800 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
801 CCP_PASSTHRU_BYTESWAP_256BIT);
803 cmd->engine_error = cmd_q->cmd_error;
807 /* Step 4: Concatenate the lengths of the AAD and source, and
808 * hash that 16 byte buffer.
810 ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
814 final = (unsigned long long *) final_wa.address;
815 final[0] = cpu_to_be64(aes->aad_len * 8);
816 final[1] = cpu_to_be64(ilen * 8);
818 memset(&op, 0, sizeof(op));
821 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
822 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
824 op.u.aes.type = aes->type;
825 op.u.aes.mode = CCP_AES_MODE_GHASH;
826 op.u.aes.action = CCP_AES_GHASHFINAL;
827 op.src.type = CCP_MEMTYPE_SYSTEM;
828 op.src.u.dma.address = final_wa.dma.address;
829 op.src.u.dma.length = AES_BLOCK_SIZE;
830 op.dst.type = CCP_MEMTYPE_SYSTEM;
831 op.dst.u.dma.address = final_wa.dma.address;
832 op.dst.u.dma.length = AES_BLOCK_SIZE;
835 ret = cmd_q->ccp->vdata->perform->aes(&op);
839 if (aes->action == CCP_AES_ACTION_ENCRYPT) {
840 /* Put the ciphered tag after the ciphertext. */
841 ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
843 /* Does this ciphered tag match the input? */
844 ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
848 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
852 ret = crypto_memneq(tag.address, final_wa.address,
853 AES_BLOCK_SIZE) ? -EBADMSG : 0;
858 ccp_dm_free(&final_wa);
861 if (aes->src_len && !in_place)
862 ccp_free_data(&dst, cmd_q);
866 ccp_free_data(&src, cmd_q);
870 ccp_free_data(&aad, cmd_q);
881 static noinline_for_stack int
882 ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
884 struct ccp_aes_engine *aes = &cmd->u.aes;
885 struct ccp_dm_workarea key, ctx;
886 struct ccp_data src, dst;
888 unsigned int dm_offset;
889 bool in_place = false;
892 if (!((aes->key_len == AES_KEYSIZE_128) ||
893 (aes->key_len == AES_KEYSIZE_192) ||
894 (aes->key_len == AES_KEYSIZE_256)))
897 if (((aes->mode == CCP_AES_MODE_ECB) ||
898 (aes->mode == CCP_AES_MODE_CBC)) &&
899 (aes->src_len & (AES_BLOCK_SIZE - 1)))
902 if (!aes->key || !aes->src || !aes->dst)
905 if (aes->mode != CCP_AES_MODE_ECB) {
906 if (aes->iv_len != AES_BLOCK_SIZE)
913 BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1);
914 BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1);
917 memset(&op, 0, sizeof(op));
919 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
920 op.sb_key = cmd_q->sb_key;
921 op.sb_ctx = cmd_q->sb_ctx;
922 op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
923 op.u.aes.type = aes->type;
924 op.u.aes.mode = aes->mode;
925 op.u.aes.action = aes->action;
927 /* All supported key sizes fit in a single (32-byte) SB entry
928 * and must be in little endian format. Use the 256-bit byte
929 * swap passthru option to convert from big endian to little
932 ret = ccp_init_dm_workarea(&key, cmd_q,
933 CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES,
938 dm_offset = CCP_SB_BYTES - aes->key_len;
939 ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
942 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
943 CCP_PASSTHRU_BYTESWAP_256BIT);
945 cmd->engine_error = cmd_q->cmd_error;
949 /* The AES context fits in a single (32-byte) SB entry and
950 * must be in little endian format. Use the 256-bit byte swap
951 * passthru option to convert from big endian to little endian.
953 ret = ccp_init_dm_workarea(&ctx, cmd_q,
954 CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
959 if (aes->mode != CCP_AES_MODE_ECB) {
960 /* Load the AES context - convert to LE */
961 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
962 ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
965 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
966 CCP_PASSTHRU_BYTESWAP_256BIT);
968 cmd->engine_error = cmd_q->cmd_error;
973 case CCP_AES_MODE_CFB: /* CFB128 only */
974 case CCP_AES_MODE_CTR:
975 op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1;
981 /* Prepare the input and output data workareas. For in-place
982 * operations we need to set the dma direction to BIDIRECTIONAL
983 * and copy the src workarea to the dst workarea.
985 if (sg_virt(aes->src) == sg_virt(aes->dst))
988 ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
990 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
997 ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
998 AES_BLOCK_SIZE, DMA_FROM_DEVICE);
1003 /* Send data to the CCP AES engine */
1004 while (src.sg_wa.bytes_left) {
1005 ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
1006 if (!src.sg_wa.bytes_left) {
1009 /* Since we don't retrieve the AES context in ECB
1010 * mode we have to wait for the operation to complete
1011 * on the last piece of data
1013 if (aes->mode == CCP_AES_MODE_ECB)
1017 ret = cmd_q->ccp->vdata->perform->aes(&op);
1019 cmd->engine_error = cmd_q->cmd_error;
1023 ccp_process_data(&src, &dst, &op);
1026 if (aes->mode != CCP_AES_MODE_ECB) {
1027 /* Retrieve the AES context - convert from LE to BE using
1028 * 32-byte (256-bit) byteswapping
1030 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1031 CCP_PASSTHRU_BYTESWAP_256BIT);
1033 cmd->engine_error = cmd_q->cmd_error;
1037 /* ...but we only need AES_BLOCK_SIZE bytes */
1038 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1039 ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
1044 ccp_free_data(&dst, cmd_q);
1047 ccp_free_data(&src, cmd_q);
1058 static noinline_for_stack int
1059 ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1061 struct ccp_xts_aes_engine *xts = &cmd->u.xts;
1062 struct ccp_dm_workarea key, ctx;
1063 struct ccp_data src, dst;
1065 unsigned int unit_size, dm_offset;
1066 bool in_place = false;
1067 unsigned int sb_count;
1068 enum ccp_aes_type aestype;
1071 switch (xts->unit_size) {
1072 case CCP_XTS_AES_UNIT_SIZE_16:
1075 case CCP_XTS_AES_UNIT_SIZE_512:
1078 case CCP_XTS_AES_UNIT_SIZE_1024:
1081 case CCP_XTS_AES_UNIT_SIZE_2048:
1084 case CCP_XTS_AES_UNIT_SIZE_4096:
1092 if (xts->key_len == AES_KEYSIZE_128)
1093 aestype = CCP_AES_TYPE_128;
1094 else if (xts->key_len == AES_KEYSIZE_256)
1095 aestype = CCP_AES_TYPE_256;
1099 if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
1102 if (xts->iv_len != AES_BLOCK_SIZE)
1105 if (!xts->key || !xts->iv || !xts->src || !xts->dst)
1108 BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1);
1109 BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1);
1112 memset(&op, 0, sizeof(op));
1114 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1115 op.sb_key = cmd_q->sb_key;
1116 op.sb_ctx = cmd_q->sb_ctx;
1118 op.u.xts.type = aestype;
1119 op.u.xts.action = xts->action;
1120 op.u.xts.unit_size = xts->unit_size;
1122 /* A version 3 device only supports 128-bit keys, which fits into a
1123 * single SB entry. A version 5 device uses a 512-bit vector, so two
1126 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
1127 sb_count = CCP_XTS_AES_KEY_SB_COUNT;
1129 sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
1130 ret = ccp_init_dm_workarea(&key, cmd_q,
1131 sb_count * CCP_SB_BYTES,
1136 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1137 /* All supported key sizes must be in little endian format.
1138 * Use the 256-bit byte swap passthru option to convert from
1139 * big endian to little endian.
1141 dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
1142 ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
1145 ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
1149 /* Version 5 CCPs use a 512-bit space for the key: each portion
1150 * occupies 256 bits, or one entire slot, and is zero-padded.
1154 dm_offset = CCP_SB_BYTES;
1155 pad = dm_offset - xts->key_len;
1156 ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
1159 ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
1160 xts->key_len, xts->key_len);
1164 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1165 CCP_PASSTHRU_BYTESWAP_256BIT);
1167 cmd->engine_error = cmd_q->cmd_error;
1171 /* The AES context fits in a single (32-byte) SB entry and
1172 * for XTS is already in little endian format so no byte swapping
1175 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1176 CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES,
1181 ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
1184 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1185 CCP_PASSTHRU_BYTESWAP_NOOP);
1187 cmd->engine_error = cmd_q->cmd_error;
1191 /* Prepare the input and output data workareas. For in-place
1192 * operations we need to set the dma direction to BIDIRECTIONAL
1193 * and copy the src workarea to the dst workarea.
1195 if (sg_virt(xts->src) == sg_virt(xts->dst))
1198 ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
1200 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1207 ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
1208 unit_size, DMA_FROM_DEVICE);
1213 /* Send data to the CCP AES engine */
1214 while (src.sg_wa.bytes_left) {
1215 ccp_prepare_data(&src, &dst, &op, unit_size, true);
1216 if (!src.sg_wa.bytes_left)
1219 ret = cmd_q->ccp->vdata->perform->xts_aes(&op);
1221 cmd->engine_error = cmd_q->cmd_error;
1225 ccp_process_data(&src, &dst, &op);
1228 /* Retrieve the AES context - convert from LE to BE using
1229 * 32-byte (256-bit) byteswapping
1231 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1232 CCP_PASSTHRU_BYTESWAP_256BIT);
1234 cmd->engine_error = cmd_q->cmd_error;
1238 /* ...but we only need AES_BLOCK_SIZE bytes */
1239 dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
1240 ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
1244 ccp_free_data(&dst, cmd_q);
1247 ccp_free_data(&src, cmd_q);
1258 static noinline_for_stack int
1259 ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1261 struct ccp_des3_engine *des3 = &cmd->u.des3;
1263 struct ccp_dm_workarea key, ctx;
1264 struct ccp_data src, dst;
1266 unsigned int dm_offset;
1267 unsigned int len_singlekey;
1268 bool in_place = false;
1272 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
1275 if (!cmd_q->ccp->vdata->perform->des3)
1278 if (des3->key_len != DES3_EDE_KEY_SIZE)
1281 if (((des3->mode == CCP_DES3_MODE_ECB) ||
1282 (des3->mode == CCP_DES3_MODE_CBC)) &&
1283 (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
1286 if (!des3->key || !des3->src || !des3->dst)
1289 if (des3->mode != CCP_DES3_MODE_ECB) {
1290 if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
1298 /* Zero out all the fields of the command desc */
1299 memset(&op, 0, sizeof(op));
1301 /* Set up the Function field */
1303 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1304 op.sb_key = cmd_q->sb_key;
1306 op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
1307 op.u.des3.type = des3->type;
1308 op.u.des3.mode = des3->mode;
1309 op.u.des3.action = des3->action;
1312 * All supported key sizes fit in a single (32-byte) KSB entry and
1313 * (like AES) must be in little endian format. Use the 256-bit byte
1314 * swap passthru option to convert from big endian to little endian.
1316 ret = ccp_init_dm_workarea(&key, cmd_q,
1317 CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
1323 * The contents of the key triplet are in the reverse order of what
1324 * is required by the engine. Copy the 3 pieces individually to put
1325 * them where they belong.
1327 dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
1329 len_singlekey = des3->key_len / 3;
1330 ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
1331 des3->key, 0, len_singlekey);
1334 ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
1335 des3->key, len_singlekey, len_singlekey);
1338 ret = ccp_set_dm_area(&key, dm_offset,
1339 des3->key, 2 * len_singlekey, len_singlekey);
1343 /* Copy the key to the SB */
1344 ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
1345 CCP_PASSTHRU_BYTESWAP_256BIT);
1347 cmd->engine_error = cmd_q->cmd_error;
1352 * The DES3 context fits in a single (32-byte) KSB entry and
1353 * must be in little endian format. Use the 256-bit byte swap
1354 * passthru option to convert from big endian to little endian.
1356 if (des3->mode != CCP_DES3_MODE_ECB) {
1357 op.sb_ctx = cmd_q->sb_ctx;
1359 ret = ccp_init_dm_workarea(&ctx, cmd_q,
1360 CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
1365 /* Load the context into the LSB */
1366 dm_offset = CCP_SB_BYTES - des3->iv_len;
1367 ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
1372 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1373 CCP_PASSTHRU_BYTESWAP_256BIT);
1375 cmd->engine_error = cmd_q->cmd_error;
1381 * Prepare the input and output data workareas. For in-place
1382 * operations we need to set the dma direction to BIDIRECTIONAL
1383 * and copy the src workarea to the dst workarea.
1385 if (sg_virt(des3->src) == sg_virt(des3->dst))
1388 ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
1389 DES3_EDE_BLOCK_SIZE,
1390 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1397 ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
1398 DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
1403 /* Send data to the CCP DES3 engine */
1404 while (src.sg_wa.bytes_left) {
1405 ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
1406 if (!src.sg_wa.bytes_left) {
1409 /* Since we don't retrieve the context in ECB mode
1410 * we have to wait for the operation to complete
1411 * on the last piece of data
1416 ret = cmd_q->ccp->vdata->perform->des3(&op);
1418 cmd->engine_error = cmd_q->cmd_error;
1422 ccp_process_data(&src, &dst, &op);
1425 if (des3->mode != CCP_DES3_MODE_ECB) {
1426 /* Retrieve the context and make BE */
1427 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1428 CCP_PASSTHRU_BYTESWAP_256BIT);
1430 cmd->engine_error = cmd_q->cmd_error;
1434 /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
1435 ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
1436 DES3_EDE_BLOCK_SIZE);
1440 ccp_free_data(&dst, cmd_q);
1443 ccp_free_data(&src, cmd_q);
1446 if (des3->mode != CCP_DES3_MODE_ECB)
1455 static noinline_for_stack int
1456 ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1458 struct ccp_sha_engine *sha = &cmd->u.sha;
1459 struct ccp_dm_workarea ctx;
1460 struct ccp_data src;
1462 unsigned int ioffset, ooffset;
1463 unsigned int digest_size;
1470 switch (sha->type) {
1471 case CCP_SHA_TYPE_1:
1472 if (sha->ctx_len < SHA1_DIGEST_SIZE)
1474 block_size = SHA1_BLOCK_SIZE;
1476 case CCP_SHA_TYPE_224:
1477 if (sha->ctx_len < SHA224_DIGEST_SIZE)
1479 block_size = SHA224_BLOCK_SIZE;
1481 case CCP_SHA_TYPE_256:
1482 if (sha->ctx_len < SHA256_DIGEST_SIZE)
1484 block_size = SHA256_BLOCK_SIZE;
1486 case CCP_SHA_TYPE_384:
1487 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1488 || sha->ctx_len < SHA384_DIGEST_SIZE)
1490 block_size = SHA384_BLOCK_SIZE;
1492 case CCP_SHA_TYPE_512:
1493 if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)
1494 || sha->ctx_len < SHA512_DIGEST_SIZE)
1496 block_size = SHA512_BLOCK_SIZE;
1505 if (!sha->final && (sha->src_len & (block_size - 1)))
1508 /* The version 3 device can't handle zero-length input */
1509 if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
1511 if (!sha->src_len) {
1512 unsigned int digest_len;
1515 /* Not final, just return */
1519 /* CCP can't do a zero length sha operation so the
1520 * caller must buffer the data.
1525 /* The CCP cannot perform zero-length sha operations
1526 * so the caller is required to buffer data for the
1527 * final operation. However, a sha operation for a
1528 * message with a total length of zero is valid so
1529 * known values are required to supply the result.
1531 switch (sha->type) {
1532 case CCP_SHA_TYPE_1:
1533 sha_zero = sha1_zero_message_hash;
1534 digest_len = SHA1_DIGEST_SIZE;
1536 case CCP_SHA_TYPE_224:
1537 sha_zero = sha224_zero_message_hash;
1538 digest_len = SHA224_DIGEST_SIZE;
1540 case CCP_SHA_TYPE_256:
1541 sha_zero = sha256_zero_message_hash;
1542 digest_len = SHA256_DIGEST_SIZE;
1548 scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
1555 /* Set variables used throughout */
1556 switch (sha->type) {
1557 case CCP_SHA_TYPE_1:
1558 digest_size = SHA1_DIGEST_SIZE;
1559 init = (void *) ccp_sha1_init;
1560 ctx_size = SHA1_DIGEST_SIZE;
1562 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1563 ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
1565 ooffset = ioffset = 0;
1567 case CCP_SHA_TYPE_224:
1568 digest_size = SHA224_DIGEST_SIZE;
1569 init = (void *) ccp_sha224_init;
1570 ctx_size = SHA256_DIGEST_SIZE;
1573 if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0))
1574 ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
1578 case CCP_SHA_TYPE_256:
1579 digest_size = SHA256_DIGEST_SIZE;
1580 init = (void *) ccp_sha256_init;
1581 ctx_size = SHA256_DIGEST_SIZE;
1583 ooffset = ioffset = 0;
1585 case CCP_SHA_TYPE_384:
1586 digest_size = SHA384_DIGEST_SIZE;
1587 init = (void *) ccp_sha384_init;
1588 ctx_size = SHA512_DIGEST_SIZE;
1591 ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
1593 case CCP_SHA_TYPE_512:
1594 digest_size = SHA512_DIGEST_SIZE;
1595 init = (void *) ccp_sha512_init;
1596 ctx_size = SHA512_DIGEST_SIZE;
1598 ooffset = ioffset = 0;
1605 /* For zero-length plaintext the src pointer is ignored;
1606 * otherwise both parts must be valid
1608 if (sha->src_len && !sha->src)
1611 memset(&op, 0, sizeof(op));
1613 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1614 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1615 op.u.sha.type = sha->type;
1616 op.u.sha.msg_bits = sha->msg_bits;
1618 /* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
1619 * SHA384/512 require 2 adjacent SB slots, with the right half in the
1620 * first slot, and the left half in the second. Each portion must then
1621 * be in little endian format: use the 256-bit byte swap option.
1623 ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
1628 switch (sha->type) {
1629 case CCP_SHA_TYPE_1:
1630 case CCP_SHA_TYPE_224:
1631 case CCP_SHA_TYPE_256:
1632 memcpy(ctx.address + ioffset, init, ctx_size);
1634 case CCP_SHA_TYPE_384:
1635 case CCP_SHA_TYPE_512:
1636 memcpy(ctx.address + ctx_size / 2, init,
1638 memcpy(ctx.address, init + ctx_size / 2,
1646 /* Restore the context */
1647 ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
1648 sb_count * CCP_SB_BYTES);
1653 ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1654 CCP_PASSTHRU_BYTESWAP_256BIT);
1656 cmd->engine_error = cmd_q->cmd_error;
1661 /* Send data to the CCP SHA engine; block_size is set above */
1662 ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
1663 block_size, DMA_TO_DEVICE);
1667 while (src.sg_wa.bytes_left) {
1668 ccp_prepare_data(&src, NULL, &op, block_size, false);
1669 if (sha->final && !src.sg_wa.bytes_left)
1672 ret = cmd_q->ccp->vdata->perform->sha(&op);
1674 cmd->engine_error = cmd_q->cmd_error;
1678 ccp_process_data(&src, NULL, &op);
1682 ret = cmd_q->ccp->vdata->perform->sha(&op);
1684 cmd->engine_error = cmd_q->cmd_error;
1689 /* Retrieve the SHA context - convert from LE to BE using
1690 * 32-byte (256-bit) byteswapping to BE
1692 ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
1693 CCP_PASSTHRU_BYTESWAP_256BIT);
1695 cmd->engine_error = cmd_q->cmd_error;
1700 /* Finishing up, so get the digest */
1701 switch (sha->type) {
1702 case CCP_SHA_TYPE_1:
1703 case CCP_SHA_TYPE_224:
1704 case CCP_SHA_TYPE_256:
1705 ccp_get_dm_area(&ctx, ooffset,
1709 case CCP_SHA_TYPE_384:
1710 case CCP_SHA_TYPE_512:
1711 ccp_get_dm_area(&ctx, 0,
1712 sha->ctx, LSB_ITEM_SIZE - ooffset,
1714 ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
1716 LSB_ITEM_SIZE - ooffset);
1723 /* Stash the context */
1724 ccp_get_dm_area(&ctx, 0, sha->ctx, 0,
1725 sb_count * CCP_SB_BYTES);
1728 if (sha->final && sha->opad) {
1729 /* HMAC operation, recursively perform final SHA */
1730 struct ccp_cmd hmac_cmd;
1731 struct scatterlist sg;
1734 if (sha->opad_len != block_size) {
1739 hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
1744 sg_init_one(&sg, hmac_buf, block_size + digest_size);
1746 scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
1747 switch (sha->type) {
1748 case CCP_SHA_TYPE_1:
1749 case CCP_SHA_TYPE_224:
1750 case CCP_SHA_TYPE_256:
1751 memcpy(hmac_buf + block_size,
1752 ctx.address + ooffset,
1755 case CCP_SHA_TYPE_384:
1756 case CCP_SHA_TYPE_512:
1757 memcpy(hmac_buf + block_size,
1758 ctx.address + LSB_ITEM_SIZE + ooffset,
1760 memcpy(hmac_buf + block_size +
1761 (LSB_ITEM_SIZE - ooffset),
1770 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
1771 hmac_cmd.engine = CCP_ENGINE_SHA;
1772 hmac_cmd.u.sha.type = sha->type;
1773 hmac_cmd.u.sha.ctx = sha->ctx;
1774 hmac_cmd.u.sha.ctx_len = sha->ctx_len;
1775 hmac_cmd.u.sha.src = &sg;
1776 hmac_cmd.u.sha.src_len = block_size + digest_size;
1777 hmac_cmd.u.sha.opad = NULL;
1778 hmac_cmd.u.sha.opad_len = 0;
1779 hmac_cmd.u.sha.first = 1;
1780 hmac_cmd.u.sha.final = 1;
1781 hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
1783 ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
1785 cmd->engine_error = hmac_cmd.engine_error;
1792 ccp_free_data(&src, cmd_q);
1800 static noinline_for_stack int
1801 ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1803 struct ccp_rsa_engine *rsa = &cmd->u.rsa;
1804 struct ccp_dm_workarea exp, src, dst;
1806 unsigned int sb_count, i_len, o_len;
1809 /* Check against the maximum allowable size, in bits */
1810 if (rsa->key_size > cmd_q->ccp->vdata->rsamax)
1813 if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
1816 memset(&op, 0, sizeof(op));
1818 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1820 /* The RSA modulus must precede the message being acted upon, so
1821 * it must be copied to a DMA area where the message and the
1822 * modulus can be concatenated. Therefore the input buffer
1823 * length required is twice the output buffer length (which
1824 * must be a multiple of 256-bits). Compute o_len, i_len in bytes.
1825 * Buffer sizes must be a multiple of 32 bytes; rounding up may be
1828 o_len = 32 * ((rsa->key_size + 255) / 256);
1832 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1833 /* sb_count is the number of storage block slots required
1836 sb_count = o_len / CCP_SB_BYTES;
1837 op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
1842 /* A version 5 device allows a modulus size that will not fit
1843 * in the LSB, so the command will transfer it from memory.
1844 * Set the sb key to the default, even though it's not used.
1846 op.sb_key = cmd_q->sb_key;
1849 /* The RSA exponent must be in little endian format. Reverse its
1852 ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
1856 ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
1860 if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) {
1861 /* Copy the exponent to the local storage block, using
1862 * as many 32-byte blocks as were allocated above. It's
1863 * already little endian, so no further change is required.
1865 ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
1866 CCP_PASSTHRU_BYTESWAP_NOOP);
1868 cmd->engine_error = cmd_q->cmd_error;
1872 /* The exponent can be retrieved from memory via DMA. */
1873 op.exp.u.dma.address = exp.dma.address;
1874 op.exp.u.dma.offset = 0;
1877 /* Concatenate the modulus and the message. Both the modulus and
1878 * the operands must be in little endian format. Since the input
1879 * is in big endian format it must be converted.
1881 ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
1885 ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
1888 ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len);
1892 /* Prepare the output area for the operation */
1893 ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE);
1898 op.src.u.dma.address = src.dma.address;
1899 op.src.u.dma.offset = 0;
1900 op.src.u.dma.length = i_len;
1901 op.dst.u.dma.address = dst.dma.address;
1902 op.dst.u.dma.offset = 0;
1903 op.dst.u.dma.length = o_len;
1905 op.u.rsa.mod_size = rsa->key_size;
1906 op.u.rsa.input_len = i_len;
1908 ret = cmd_q->ccp->vdata->perform->rsa(&op);
1910 cmd->engine_error = cmd_q->cmd_error;
1914 ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len);
1927 cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
1932 static noinline_for_stack int
1933 ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
1935 struct ccp_passthru_engine *pt = &cmd->u.passthru;
1936 struct ccp_dm_workarea mask;
1937 struct ccp_data src, dst;
1939 bool in_place = false;
1943 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
1946 if (!pt->src || !pt->dst)
1949 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1950 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
1956 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
1958 memset(&op, 0, sizeof(op));
1960 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1962 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
1964 op.sb_key = cmd_q->sb_key;
1966 ret = ccp_init_dm_workarea(&mask, cmd_q,
1967 CCP_PASSTHRU_SB_COUNT *
1973 ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
1976 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
1977 CCP_PASSTHRU_BYTESWAP_NOOP);
1979 cmd->engine_error = cmd_q->cmd_error;
1984 /* Prepare the input and output data workareas. For in-place
1985 * operations we need to set the dma direction to BIDIRECTIONAL
1986 * and copy the src workarea to the dst workarea.
1988 if (sg_virt(pt->src) == sg_virt(pt->dst))
1991 ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
1992 CCP_PASSTHRU_MASKSIZE,
1993 in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2000 ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
2001 CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
2006 /* Send data to the CCP Passthru engine
2007 * Because the CCP engine works on a single source and destination
2008 * dma address at a time, each entry in the source scatterlist
2009 * (after the dma_map_sg call) must be less than or equal to the
2010 * (remaining) length in the destination scatterlist entry and the
2011 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
2013 dst.sg_wa.sg_used = 0;
2014 for (i = 1; i <= src.sg_wa.dma_count; i++) {
2015 if (!dst.sg_wa.sg ||
2016 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
2021 if (i == src.sg_wa.dma_count) {
2026 op.src.type = CCP_MEMTYPE_SYSTEM;
2027 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
2028 op.src.u.dma.offset = 0;
2029 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
2031 op.dst.type = CCP_MEMTYPE_SYSTEM;
2032 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
2033 op.dst.u.dma.offset = dst.sg_wa.sg_used;
2034 op.dst.u.dma.length = op.src.u.dma.length;
2036 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2038 cmd->engine_error = cmd_q->cmd_error;
2042 dst.sg_wa.sg_used += src.sg_wa.sg->length;
2043 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
2044 dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
2045 dst.sg_wa.sg_used = 0;
2047 src.sg_wa.sg = sg_next(src.sg_wa.sg);
2052 ccp_free_data(&dst, cmd_q);
2055 ccp_free_data(&src, cmd_q);
2058 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
2064 static noinline_for_stack int
2065 ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
2066 struct ccp_cmd *cmd)
2068 struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
2069 struct ccp_dm_workarea mask;
2073 if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
2076 if (!pt->src_dma || !pt->dst_dma)
2079 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2080 if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
2086 BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1);
2088 memset(&op, 0, sizeof(op));
2090 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2092 if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
2094 op.sb_key = cmd_q->sb_key;
2096 mask.length = pt->mask_len;
2097 mask.dma.address = pt->mask;
2098 mask.dma.length = pt->mask_len;
2100 ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
2101 CCP_PASSTHRU_BYTESWAP_NOOP);
2103 cmd->engine_error = cmd_q->cmd_error;
2108 /* Send data to the CCP Passthru engine */
2112 op.src.type = CCP_MEMTYPE_SYSTEM;
2113 op.src.u.dma.address = pt->src_dma;
2114 op.src.u.dma.offset = 0;
2115 op.src.u.dma.length = pt->src_len;
2117 op.dst.type = CCP_MEMTYPE_SYSTEM;
2118 op.dst.u.dma.address = pt->dst_dma;
2119 op.dst.u.dma.offset = 0;
2120 op.dst.u.dma.length = pt->src_len;
2122 ret = cmd_q->ccp->vdata->perform->passthru(&op);
2124 cmd->engine_error = cmd_q->cmd_error;
2129 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2131 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2132 struct ccp_dm_workarea src, dst;
2137 if (!ecc->u.mm.operand_1 ||
2138 (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
2141 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
2142 if (!ecc->u.mm.operand_2 ||
2143 (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
2146 if (!ecc->u.mm.result ||
2147 (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
2150 memset(&op, 0, sizeof(op));
2152 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2154 /* Concatenate the modulus and the operands. Both the modulus and
2155 * the operands must be in little endian format. Since the input
2156 * is in big endian format it must be converted and placed in a
2157 * fixed length buffer.
2159 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2164 /* Save the workarea address since it is updated in order to perform
2169 /* Copy the ECC modulus */
2170 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2173 src.address += CCP_ECC_OPERAND_SIZE;
2175 /* Copy the first operand */
2176 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
2177 ecc->u.mm.operand_1_len);
2180 src.address += CCP_ECC_OPERAND_SIZE;
2182 if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
2183 /* Copy the second operand */
2184 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
2185 ecc->u.mm.operand_2_len);
2188 src.address += CCP_ECC_OPERAND_SIZE;
2191 /* Restore the workarea address */
2194 /* Prepare the output area for the operation */
2195 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2201 op.src.u.dma.address = src.dma.address;
2202 op.src.u.dma.offset = 0;
2203 op.src.u.dma.length = src.length;
2204 op.dst.u.dma.address = dst.dma.address;
2205 op.dst.u.dma.offset = 0;
2206 op.dst.u.dma.length = dst.length;
2208 op.u.ecc.function = cmd->u.ecc.function;
2210 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2212 cmd->engine_error = cmd_q->cmd_error;
2216 ecc->ecc_result = le16_to_cpup(
2217 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2218 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2223 /* Save the ECC result */
2224 ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
2225 CCP_ECC_MODULUS_BYTES);
2236 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2238 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2239 struct ccp_dm_workarea src, dst;
2244 if (!ecc->u.pm.point_1.x ||
2245 (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
2246 !ecc->u.pm.point_1.y ||
2247 (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
2250 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2251 if (!ecc->u.pm.point_2.x ||
2252 (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
2253 !ecc->u.pm.point_2.y ||
2254 (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
2257 if (!ecc->u.pm.domain_a ||
2258 (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
2261 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
2262 if (!ecc->u.pm.scalar ||
2263 (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
2267 if (!ecc->u.pm.result.x ||
2268 (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
2269 !ecc->u.pm.result.y ||
2270 (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
2273 memset(&op, 0, sizeof(op));
2275 op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
2277 /* Concatenate the modulus and the operands. Both the modulus and
2278 * the operands must be in little endian format. Since the input
2279 * is in big endian format it must be converted and placed in a
2280 * fixed length buffer.
2282 ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
2287 /* Save the workarea address since it is updated in order to perform
2292 /* Copy the ECC modulus */
2293 ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
2296 src.address += CCP_ECC_OPERAND_SIZE;
2298 /* Copy the first point X and Y coordinate */
2299 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
2300 ecc->u.pm.point_1.x_len);
2303 src.address += CCP_ECC_OPERAND_SIZE;
2304 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
2305 ecc->u.pm.point_1.y_len);
2308 src.address += CCP_ECC_OPERAND_SIZE;
2310 /* Set the first point Z coordinate to 1 */
2311 *src.address = 0x01;
2312 src.address += CCP_ECC_OPERAND_SIZE;
2314 if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
2315 /* Copy the second point X and Y coordinate */
2316 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
2317 ecc->u.pm.point_2.x_len);
2320 src.address += CCP_ECC_OPERAND_SIZE;
2321 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
2322 ecc->u.pm.point_2.y_len);
2325 src.address += CCP_ECC_OPERAND_SIZE;
2327 /* Set the second point Z coordinate to 1 */
2328 *src.address = 0x01;
2329 src.address += CCP_ECC_OPERAND_SIZE;
2331 /* Copy the Domain "a" parameter */
2332 ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
2333 ecc->u.pm.domain_a_len);
2336 src.address += CCP_ECC_OPERAND_SIZE;
2338 if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
2339 /* Copy the scalar value */
2340 ret = ccp_reverse_set_dm_area(&src, 0,
2341 ecc->u.pm.scalar, 0,
2342 ecc->u.pm.scalar_len);
2345 src.address += CCP_ECC_OPERAND_SIZE;
2349 /* Restore the workarea address */
2352 /* Prepare the output area for the operation */
2353 ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
2359 op.src.u.dma.address = src.dma.address;
2360 op.src.u.dma.offset = 0;
2361 op.src.u.dma.length = src.length;
2362 op.dst.u.dma.address = dst.dma.address;
2363 op.dst.u.dma.offset = 0;
2364 op.dst.u.dma.length = dst.length;
2366 op.u.ecc.function = cmd->u.ecc.function;
2368 ret = cmd_q->ccp->vdata->perform->ecc(&op);
2370 cmd->engine_error = cmd_q->cmd_error;
2374 ecc->ecc_result = le16_to_cpup(
2375 (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
2376 if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
2381 /* Save the workarea address since it is updated as we walk through
2382 * to copy the point math result
2386 /* Save the ECC result X and Y coordinates */
2387 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
2388 CCP_ECC_MODULUS_BYTES);
2389 dst.address += CCP_ECC_OUTPUT_SIZE;
2390 ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
2391 CCP_ECC_MODULUS_BYTES);
2392 dst.address += CCP_ECC_OUTPUT_SIZE;
2394 /* Restore the workarea address */
2406 static noinline_for_stack int
2407 ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2409 struct ccp_ecc_engine *ecc = &cmd->u.ecc;
2411 ecc->ecc_result = 0;
2414 (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
2417 switch (ecc->function) {
2418 case CCP_ECC_FUNCTION_MMUL_384BIT:
2419 case CCP_ECC_FUNCTION_MADD_384BIT:
2420 case CCP_ECC_FUNCTION_MINV_384BIT:
2421 return ccp_run_ecc_mm_cmd(cmd_q, cmd);
2423 case CCP_ECC_FUNCTION_PADD_384BIT:
2424 case CCP_ECC_FUNCTION_PMUL_384BIT:
2425 case CCP_ECC_FUNCTION_PDBL_384BIT:
2426 return ccp_run_ecc_pm_cmd(cmd_q, cmd);
2433 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
2437 cmd->engine_error = 0;
2438 cmd_q->cmd_error = 0;
2439 cmd_q->int_rcvd = 0;
2440 cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q);
2442 switch (cmd->engine) {
2443 case CCP_ENGINE_AES:
2444 switch (cmd->u.aes.mode) {
2445 case CCP_AES_MODE_CMAC:
2446 ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
2448 case CCP_AES_MODE_GCM:
2449 ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
2452 ret = ccp_run_aes_cmd(cmd_q, cmd);
2456 case CCP_ENGINE_XTS_AES_128:
2457 ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
2459 case CCP_ENGINE_DES3:
2460 ret = ccp_run_des3_cmd(cmd_q, cmd);
2462 case CCP_ENGINE_SHA:
2463 ret = ccp_run_sha_cmd(cmd_q, cmd);
2465 case CCP_ENGINE_RSA:
2466 ret = ccp_run_rsa_cmd(cmd_q, cmd);
2468 case CCP_ENGINE_PASSTHRU:
2469 if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
2470 ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
2472 ret = ccp_run_passthru_cmd(cmd_q, cmd);
2474 case CCP_ENGINE_ECC:
2475 ret = ccp_run_ecc_cmd(cmd_q, cmd);