1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
23 #define CAAM_CRA_PRIORITY 2000
25 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
26 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
27 SHA512_DIGEST_SIZE * 2)
30 * This is a a cache of buffers, from which the users of CAAM QI driver
31 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
32 * NOTE: A more elegant solution would be to have some headroom in the frames
33 * being processed. This can be added by the dpaa2-eth driver. This would
34 * pose a problem for userspace application processing which cannot
35 * know of this limitation. So for now, this will work.
36 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
38 static struct kmem_cache *qi_cache;
40 struct caam_alg_entry {
49 struct caam_aead_alg {
51 struct caam_alg_entry caam;
55 struct caam_skcipher_alg {
56 struct skcipher_alg skcipher;
57 struct caam_alg_entry caam;
62 * caam_ctx - per-session context
63 * @flc: Flow Contexts array
64 * @key: [authentication key], encryption key
65 * @flc_dma: I/O virtual addresses of the Flow Contexts
66 * @key_dma: I/O virtual address of the key
67 * @dir: DMA direction for mapping key and Flow Contexts
69 * @adata: authentication algorithm details
70 * @cdata: encryption algorithm details
71 * @authsize: authentication tag (a.k.a. ICV / MAC) size
74 struct caam_flc flc[NUM_OP];
75 u8 key[CAAM_MAX_KEY_SIZE];
76 dma_addr_t flc_dma[NUM_OP];
78 enum dma_data_direction dir;
82 unsigned int authsize;
85 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
88 phys_addr_t phys_addr;
90 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
93 return phys_to_virt(phys_addr);
97 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
99 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
100 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
101 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
102 * hosting 16 SG entries.
104 * @flags - flags that would be used for the equivalent kmalloc(..) call
106 * Returns a pointer to a retrieved buffer on success or NULL on failure.
108 static inline void *qi_cache_zalloc(gfp_t flags)
110 return kmem_cache_zalloc(qi_cache, flags);
114 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
116 * @obj - buffer previously allocated by qi_cache_zalloc
118 * No checking is being done, the call is a passthrough call to
119 * kmem_cache_free(...)
121 static inline void qi_cache_free(void *obj)
123 kmem_cache_free(qi_cache, obj);
126 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
128 switch (crypto_tfm_alg_type(areq->tfm)) {
129 case CRYPTO_ALG_TYPE_SKCIPHER:
130 return skcipher_request_ctx(skcipher_request_cast(areq));
131 case CRYPTO_ALG_TYPE_AEAD:
132 return aead_request_ctx(container_of(areq, struct aead_request,
134 case CRYPTO_ALG_TYPE_AHASH:
135 return ahash_request_ctx(ahash_request_cast(areq));
137 return ERR_PTR(-EINVAL);
141 static void caam_unmap(struct device *dev, struct scatterlist *src,
142 struct scatterlist *dst, int src_nents,
143 int dst_nents, dma_addr_t iv_dma, int ivsize,
144 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
149 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
151 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
153 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
157 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
160 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
163 static int aead_set_sh_desc(struct crypto_aead *aead)
165 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
167 struct caam_ctx *ctx = crypto_aead_ctx(aead);
168 unsigned int ivsize = crypto_aead_ivsize(aead);
169 struct device *dev = ctx->dev;
170 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
171 struct caam_flc *flc;
175 unsigned int data_len[2];
177 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
178 OP_ALG_AAI_CTR_MOD128);
179 const bool is_rfc3686 = alg->caam.rfc3686;
181 if (!ctx->cdata.keylen || !ctx->authsize)
185 * AES-CTR needs to load IV in CONTEXT1 reg
186 * at an offset of 128bits (16bytes)
187 * CONTEXT1[255:128] = IV
194 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
197 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
198 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
199 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
203 * In case |user key| > |derived key|, using DKP<imm,imm> would result
204 * in invalid opcodes (last bytes of user key) in the resulting
205 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
206 * addresses are needed.
208 ctx->adata.key_virt = ctx->key;
209 ctx->adata.key_dma = ctx->key_dma;
211 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
212 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
214 data_len[0] = ctx->adata.keylen_pad;
215 data_len[1] = ctx->cdata.keylen;
217 /* aead_encrypt shared descriptor */
218 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
219 DESC_QI_AEAD_ENC_LEN) +
220 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
221 DESC_JOB_IO_LEN, data_len, &inl_mask,
222 ARRAY_SIZE(data_len)) < 0)
225 ctx->adata.key_inline = !!(inl_mask & 1);
226 ctx->cdata.key_inline = !!(inl_mask & 2);
228 flc = &ctx->flc[ENCRYPT];
232 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
233 ivsize, ctx->authsize, is_rfc3686,
234 nonce, ctx1_iv_off, true,
237 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
238 ivsize, ctx->authsize, is_rfc3686, nonce,
239 ctx1_iv_off, true, priv->sec_attr.era);
241 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
242 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
243 sizeof(flc->flc) + desc_bytes(desc),
246 /* aead_decrypt shared descriptor */
247 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
248 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
249 DESC_JOB_IO_LEN, data_len, &inl_mask,
250 ARRAY_SIZE(data_len)) < 0)
253 ctx->adata.key_inline = !!(inl_mask & 1);
254 ctx->cdata.key_inline = !!(inl_mask & 2);
256 flc = &ctx->flc[DECRYPT];
258 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
259 ivsize, ctx->authsize, alg->caam.geniv,
260 is_rfc3686, nonce, ctx1_iv_off, true,
262 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
263 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
264 sizeof(flc->flc) + desc_bytes(desc),
270 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
272 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
274 ctx->authsize = authsize;
275 aead_set_sh_desc(authenc);
280 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
283 struct caam_ctx *ctx = crypto_aead_ctx(aead);
284 struct device *dev = ctx->dev;
285 struct crypto_authenc_keys keys;
287 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
290 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
291 keys.authkeylen + keys.enckeylen, keys.enckeylen,
293 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
294 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
296 ctx->adata.keylen = keys.authkeylen;
297 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
300 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
303 memcpy(ctx->key, keys.authkey, keys.authkeylen);
304 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
305 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
306 keys.enckeylen, ctx->dir);
307 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
308 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
309 ctx->adata.keylen_pad + keys.enckeylen, 1);
311 ctx->cdata.keylen = keys.enckeylen;
313 memzero_explicit(&keys, sizeof(keys));
314 return aead_set_sh_desc(aead);
316 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
317 memzero_explicit(&keys, sizeof(keys));
321 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
324 struct crypto_authenc_keys keys;
327 err = crypto_authenc_extractkeys(&keys, key, keylen);
332 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
335 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
336 aead_setkey(aead, key, keylen);
339 memzero_explicit(&keys, sizeof(keys));
343 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
347 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
350 struct crypto_aead *aead = crypto_aead_reqtfm(req);
351 struct caam_request *req_ctx = aead_request_ctx(req);
352 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
353 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
354 struct caam_ctx *ctx = crypto_aead_ctx(aead);
355 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
357 struct device *dev = ctx->dev;
358 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
359 GFP_KERNEL : GFP_ATOMIC;
360 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
361 int src_len, dst_len = 0;
362 struct aead_edesc *edesc;
363 dma_addr_t qm_sg_dma, iv_dma = 0;
365 unsigned int authsize = ctx->authsize;
366 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
368 struct dpaa2_sg_entry *sg_table;
370 /* allocate space for base edesc, link tables and IV */
371 edesc = qi_cache_zalloc(GFP_DMA | flags);
372 if (unlikely(!edesc)) {
373 dev_err(dev, "could not allocate extended descriptor\n");
374 return ERR_PTR(-ENOMEM);
377 if (unlikely(req->dst != req->src)) {
378 src_len = req->assoclen + req->cryptlen;
379 dst_len = src_len + (encrypt ? authsize : (-authsize));
381 src_nents = sg_nents_for_len(req->src, src_len);
382 if (unlikely(src_nents < 0)) {
383 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
385 qi_cache_free(edesc);
386 return ERR_PTR(src_nents);
389 dst_nents = sg_nents_for_len(req->dst, dst_len);
390 if (unlikely(dst_nents < 0)) {
391 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
393 qi_cache_free(edesc);
394 return ERR_PTR(dst_nents);
398 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
400 if (unlikely(!mapped_src_nents)) {
401 dev_err(dev, "unable to map source\n");
402 qi_cache_free(edesc);
403 return ERR_PTR(-ENOMEM);
406 mapped_src_nents = 0;
410 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
412 if (unlikely(!mapped_dst_nents)) {
413 dev_err(dev, "unable to map destination\n");
414 dma_unmap_sg(dev, req->src, src_nents,
416 qi_cache_free(edesc);
417 return ERR_PTR(-ENOMEM);
420 mapped_dst_nents = 0;
423 src_len = req->assoclen + req->cryptlen +
424 (encrypt ? authsize : 0);
426 src_nents = sg_nents_for_len(req->src, src_len);
427 if (unlikely(src_nents < 0)) {
428 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
430 qi_cache_free(edesc);
431 return ERR_PTR(src_nents);
434 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
436 if (unlikely(!mapped_src_nents)) {
437 dev_err(dev, "unable to map source\n");
438 qi_cache_free(edesc);
439 return ERR_PTR(-ENOMEM);
443 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
444 ivsize = crypto_aead_ivsize(aead);
447 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
448 * Input is not contiguous.
449 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
450 * the end of the table by allocating more S/G entries. Logic:
451 * if (src != dst && output S/G)
452 * pad output S/G, if needed
453 * else if (src == dst && S/G)
454 * overlapping S/Gs; pad one of them
455 * else if (input S/G) ...
456 * pad input S/G, if needed
458 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
459 if (mapped_dst_nents > 1)
460 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
461 else if ((req->src == req->dst) && (mapped_src_nents > 1))
462 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
464 pad_sg_nents(mapped_src_nents));
466 qm_sg_nents = pad_sg_nents(qm_sg_nents);
468 sg_table = &edesc->sgt[0];
469 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
470 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
471 CAAM_QI_MEMCACHE_SIZE)) {
472 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
473 qm_sg_nents, ivsize);
474 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
476 qi_cache_free(edesc);
477 return ERR_PTR(-ENOMEM);
481 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
483 /* Make sure IV is located in a DMAable area */
484 memcpy(iv, req->iv, ivsize);
486 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
487 if (dma_mapping_error(dev, iv_dma)) {
488 dev_err(dev, "unable to map IV\n");
489 caam_unmap(dev, req->src, req->dst, src_nents,
490 dst_nents, 0, 0, DMA_NONE, 0, 0);
491 qi_cache_free(edesc);
492 return ERR_PTR(-ENOMEM);
496 edesc->src_nents = src_nents;
497 edesc->dst_nents = dst_nents;
498 edesc->iv_dma = iv_dma;
500 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
501 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
503 * The associated data comes already with the IV but we need
504 * to skip it when we authenticate or encrypt...
506 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
508 edesc->assoclen = cpu_to_caam32(req->assoclen);
509 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
511 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
512 dev_err(dev, "unable to map assoclen\n");
513 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
514 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
515 qi_cache_free(edesc);
516 return ERR_PTR(-ENOMEM);
519 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
522 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
525 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
526 qm_sg_index += mapped_src_nents;
528 if (mapped_dst_nents > 1)
529 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
531 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
532 if (dma_mapping_error(dev, qm_sg_dma)) {
533 dev_err(dev, "unable to map S/G table\n");
534 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
535 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
536 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
537 qi_cache_free(edesc);
538 return ERR_PTR(-ENOMEM);
541 edesc->qm_sg_dma = qm_sg_dma;
542 edesc->qm_sg_bytes = qm_sg_bytes;
544 out_len = req->assoclen + req->cryptlen +
545 (encrypt ? ctx->authsize : (-ctx->authsize));
546 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
548 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
549 dpaa2_fl_set_final(in_fle, true);
550 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
551 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
552 dpaa2_fl_set_len(in_fle, in_len);
554 if (req->dst == req->src) {
555 if (mapped_src_nents == 1) {
556 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
557 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
559 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
560 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
561 (1 + !!ivsize) * sizeof(*sg_table));
563 } else if (!mapped_dst_nents) {
565 * crypto engine requires the output entry to be present when
566 * "frame list" FD is used.
567 * Since engine does not support FMT=2'b11 (unused entry type),
568 * leaving out_fle zeroized is the best option.
571 } else if (mapped_dst_nents == 1) {
572 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
573 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
575 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
576 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
580 dpaa2_fl_set_len(out_fle, out_len);
586 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
588 struct caam_ctx *ctx = crypto_aead_ctx(aead);
589 unsigned int ivsize = crypto_aead_ivsize(aead);
590 struct device *dev = ctx->dev;
591 struct caam_flc *flc;
594 if (!ctx->cdata.keylen || !ctx->authsize)
597 flc = &ctx->flc[ENCRYPT];
599 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
600 ctx->authsize, true, true);
601 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
602 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
603 sizeof(flc->flc) + desc_bytes(desc),
606 flc = &ctx->flc[DECRYPT];
608 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
609 ctx->authsize, false, true);
610 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
611 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
612 sizeof(flc->flc) + desc_bytes(desc),
618 static int chachapoly_setauthsize(struct crypto_aead *aead,
619 unsigned int authsize)
621 struct caam_ctx *ctx = crypto_aead_ctx(aead);
623 if (authsize != POLY1305_DIGEST_SIZE)
626 ctx->authsize = authsize;
627 return chachapoly_set_sh_desc(aead);
630 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
633 struct caam_ctx *ctx = crypto_aead_ctx(aead);
634 unsigned int ivsize = crypto_aead_ivsize(aead);
635 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
637 if (keylen != CHACHA_KEY_SIZE + saltlen) {
638 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
642 ctx->cdata.key_virt = key;
643 ctx->cdata.keylen = keylen - saltlen;
645 return chachapoly_set_sh_desc(aead);
648 static int gcm_set_sh_desc(struct crypto_aead *aead)
650 struct caam_ctx *ctx = crypto_aead_ctx(aead);
651 struct device *dev = ctx->dev;
652 unsigned int ivsize = crypto_aead_ivsize(aead);
653 struct caam_flc *flc;
655 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
658 if (!ctx->cdata.keylen || !ctx->authsize)
662 * AES GCM encrypt shared descriptor
663 * Job Descriptor and Shared Descriptor
664 * must fit into the 64-word Descriptor h/w Buffer
666 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
667 ctx->cdata.key_inline = true;
668 ctx->cdata.key_virt = ctx->key;
670 ctx->cdata.key_inline = false;
671 ctx->cdata.key_dma = ctx->key_dma;
674 flc = &ctx->flc[ENCRYPT];
676 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
677 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
678 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
679 sizeof(flc->flc) + desc_bytes(desc),
683 * Job Descriptor and Shared Descriptors
684 * must all fit into the 64-word Descriptor h/w Buffer
686 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
687 ctx->cdata.key_inline = true;
688 ctx->cdata.key_virt = ctx->key;
690 ctx->cdata.key_inline = false;
691 ctx->cdata.key_dma = ctx->key_dma;
694 flc = &ctx->flc[DECRYPT];
696 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
697 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
698 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
699 sizeof(flc->flc) + desc_bytes(desc),
705 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
707 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
710 err = crypto_gcm_check_authsize(authsize);
714 ctx->authsize = authsize;
715 gcm_set_sh_desc(authenc);
720 static int gcm_setkey(struct crypto_aead *aead,
721 const u8 *key, unsigned int keylen)
723 struct caam_ctx *ctx = crypto_aead_ctx(aead);
724 struct device *dev = ctx->dev;
727 ret = aes_check_keylen(keylen);
729 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
732 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
733 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
735 memcpy(ctx->key, key, keylen);
736 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
737 ctx->cdata.keylen = keylen;
739 return gcm_set_sh_desc(aead);
742 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
744 struct caam_ctx *ctx = crypto_aead_ctx(aead);
745 struct device *dev = ctx->dev;
746 unsigned int ivsize = crypto_aead_ivsize(aead);
747 struct caam_flc *flc;
749 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
752 if (!ctx->cdata.keylen || !ctx->authsize)
755 ctx->cdata.key_virt = ctx->key;
758 * RFC4106 encrypt shared descriptor
759 * Job Descriptor and Shared Descriptor
760 * must fit into the 64-word Descriptor h/w Buffer
762 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
763 ctx->cdata.key_inline = true;
765 ctx->cdata.key_inline = false;
766 ctx->cdata.key_dma = ctx->key_dma;
769 flc = &ctx->flc[ENCRYPT];
771 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
773 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
774 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
775 sizeof(flc->flc) + desc_bytes(desc),
779 * Job Descriptor and Shared Descriptors
780 * must all fit into the 64-word Descriptor h/w Buffer
782 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
783 ctx->cdata.key_inline = true;
785 ctx->cdata.key_inline = false;
786 ctx->cdata.key_dma = ctx->key_dma;
789 flc = &ctx->flc[DECRYPT];
791 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
793 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
794 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
795 sizeof(flc->flc) + desc_bytes(desc),
801 static int rfc4106_setauthsize(struct crypto_aead *authenc,
802 unsigned int authsize)
804 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
807 err = crypto_rfc4106_check_authsize(authsize);
811 ctx->authsize = authsize;
812 rfc4106_set_sh_desc(authenc);
817 static int rfc4106_setkey(struct crypto_aead *aead,
818 const u8 *key, unsigned int keylen)
820 struct caam_ctx *ctx = crypto_aead_ctx(aead);
821 struct device *dev = ctx->dev;
824 ret = aes_check_keylen(keylen - 4);
826 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
830 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
831 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
833 memcpy(ctx->key, key, keylen);
835 * The last four bytes of the key material are used as the salt value
836 * in the nonce. Update the AES key length.
838 ctx->cdata.keylen = keylen - 4;
839 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
842 return rfc4106_set_sh_desc(aead);
845 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
847 struct caam_ctx *ctx = crypto_aead_ctx(aead);
848 struct device *dev = ctx->dev;
849 unsigned int ivsize = crypto_aead_ivsize(aead);
850 struct caam_flc *flc;
852 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
855 if (!ctx->cdata.keylen || !ctx->authsize)
858 ctx->cdata.key_virt = ctx->key;
861 * RFC4543 encrypt shared descriptor
862 * Job Descriptor and Shared Descriptor
863 * must fit into the 64-word Descriptor h/w Buffer
865 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
866 ctx->cdata.key_inline = true;
868 ctx->cdata.key_inline = false;
869 ctx->cdata.key_dma = ctx->key_dma;
872 flc = &ctx->flc[ENCRYPT];
874 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
876 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
877 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
878 sizeof(flc->flc) + desc_bytes(desc),
882 * Job Descriptor and Shared Descriptors
883 * must all fit into the 64-word Descriptor h/w Buffer
885 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
886 ctx->cdata.key_inline = true;
888 ctx->cdata.key_inline = false;
889 ctx->cdata.key_dma = ctx->key_dma;
892 flc = &ctx->flc[DECRYPT];
894 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
896 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
897 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
898 sizeof(flc->flc) + desc_bytes(desc),
904 static int rfc4543_setauthsize(struct crypto_aead *authenc,
905 unsigned int authsize)
907 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
912 ctx->authsize = authsize;
913 rfc4543_set_sh_desc(authenc);
918 static int rfc4543_setkey(struct crypto_aead *aead,
919 const u8 *key, unsigned int keylen)
921 struct caam_ctx *ctx = crypto_aead_ctx(aead);
922 struct device *dev = ctx->dev;
925 ret = aes_check_keylen(keylen - 4);
927 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
931 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
932 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
934 memcpy(ctx->key, key, keylen);
936 * The last four bytes of the key material are used as the salt value
937 * in the nonce. Update the AES key length.
939 ctx->cdata.keylen = keylen - 4;
940 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
943 return rfc4543_set_sh_desc(aead);
946 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
947 unsigned int keylen, const u32 ctx1_iv_off)
949 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
950 struct caam_skcipher_alg *alg =
951 container_of(crypto_skcipher_alg(skcipher),
952 struct caam_skcipher_alg, skcipher);
953 struct device *dev = ctx->dev;
954 struct caam_flc *flc;
955 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
957 const bool is_rfc3686 = alg->caam.rfc3686;
959 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
960 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
962 ctx->cdata.keylen = keylen;
963 ctx->cdata.key_virt = key;
964 ctx->cdata.key_inline = true;
966 /* skcipher_encrypt shared descriptor */
967 flc = &ctx->flc[ENCRYPT];
969 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
971 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
972 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
973 sizeof(flc->flc) + desc_bytes(desc),
976 /* skcipher_decrypt shared descriptor */
977 flc = &ctx->flc[DECRYPT];
979 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
981 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
982 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
983 sizeof(flc->flc) + desc_bytes(desc),
989 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
990 const u8 *key, unsigned int keylen)
994 err = aes_check_keylen(keylen);
996 crypto_skcipher_set_flags(skcipher,
997 CRYPTO_TFM_RES_BAD_KEY_LEN);
1001 return skcipher_setkey(skcipher, key, keylen, 0);
1004 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
1005 const u8 *key, unsigned int keylen)
1012 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1013 * | *key = {KEY, NONCE}
1015 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1016 keylen -= CTR_RFC3686_NONCE_SIZE;
1018 err = aes_check_keylen(keylen);
1020 crypto_skcipher_set_flags(skcipher,
1021 CRYPTO_TFM_RES_BAD_KEY_LEN);
1025 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1028 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1029 const u8 *key, unsigned int keylen)
1035 * AES-CTR needs to load IV in CONTEXT1 reg
1036 * at an offset of 128bits (16bytes)
1037 * CONTEXT1[255:128] = IV
1041 err = aes_check_keylen(keylen);
1043 crypto_skcipher_set_flags(skcipher,
1044 CRYPTO_TFM_RES_BAD_KEY_LEN);
1048 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1051 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1052 const u8 *key, unsigned int keylen)
1054 if (keylen != CHACHA_KEY_SIZE) {
1055 crypto_skcipher_set_flags(skcipher,
1056 CRYPTO_TFM_RES_BAD_KEY_LEN);
1060 return skcipher_setkey(skcipher, key, keylen, 0);
1063 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1064 const u8 *key, unsigned int keylen)
1066 return verify_skcipher_des_key(skcipher, key) ?:
1067 skcipher_setkey(skcipher, key, keylen, 0);
1070 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1071 const u8 *key, unsigned int keylen)
1073 return verify_skcipher_des3_key(skcipher, key) ?:
1074 skcipher_setkey(skcipher, key, keylen, 0);
1077 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1078 unsigned int keylen)
1080 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1081 struct device *dev = ctx->dev;
1082 struct caam_flc *flc;
1085 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
1086 dev_err(dev, "key size mismatch\n");
1087 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1091 ctx->cdata.keylen = keylen;
1092 ctx->cdata.key_virt = key;
1093 ctx->cdata.key_inline = true;
1095 /* xts_skcipher_encrypt shared descriptor */
1096 flc = &ctx->flc[ENCRYPT];
1097 desc = flc->sh_desc;
1098 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1099 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1100 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1101 sizeof(flc->flc) + desc_bytes(desc),
1104 /* xts_skcipher_decrypt shared descriptor */
1105 flc = &ctx->flc[DECRYPT];
1106 desc = flc->sh_desc;
1107 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1108 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1109 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1110 sizeof(flc->flc) + desc_bytes(desc),
1116 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1118 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1119 struct caam_request *req_ctx = skcipher_request_ctx(req);
1120 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1121 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1122 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1123 struct device *dev = ctx->dev;
1124 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1125 GFP_KERNEL : GFP_ATOMIC;
1126 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1127 struct skcipher_edesc *edesc;
1130 int ivsize = crypto_skcipher_ivsize(skcipher);
1131 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1132 struct dpaa2_sg_entry *sg_table;
1134 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1135 if (unlikely(src_nents < 0)) {
1136 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1138 return ERR_PTR(src_nents);
1141 if (unlikely(req->dst != req->src)) {
1142 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1143 if (unlikely(dst_nents < 0)) {
1144 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1146 return ERR_PTR(dst_nents);
1149 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1151 if (unlikely(!mapped_src_nents)) {
1152 dev_err(dev, "unable to map source\n");
1153 return ERR_PTR(-ENOMEM);
1156 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1158 if (unlikely(!mapped_dst_nents)) {
1159 dev_err(dev, "unable to map destination\n");
1160 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1161 return ERR_PTR(-ENOMEM);
1164 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1166 if (unlikely(!mapped_src_nents)) {
1167 dev_err(dev, "unable to map source\n");
1168 return ERR_PTR(-ENOMEM);
1172 qm_sg_ents = 1 + mapped_src_nents;
1173 dst_sg_idx = qm_sg_ents;
1176 * Input, output HW S/G tables: [IV, src][dst, IV]
1177 * IV entries point to the same buffer
1178 * If src == dst, S/G entries are reused (S/G tables overlap)
1180 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1181 * the end of the table by allocating more S/G entries.
1183 if (req->src != req->dst)
1184 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1186 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1188 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1189 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1190 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1191 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1192 qm_sg_ents, ivsize);
1193 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1195 return ERR_PTR(-ENOMEM);
1198 /* allocate space for base edesc, link tables and IV */
1199 edesc = qi_cache_zalloc(GFP_DMA | flags);
1200 if (unlikely(!edesc)) {
1201 dev_err(dev, "could not allocate extended descriptor\n");
1202 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1204 return ERR_PTR(-ENOMEM);
1207 /* Make sure IV is located in a DMAable area */
1208 sg_table = &edesc->sgt[0];
1209 iv = (u8 *)(sg_table + qm_sg_ents);
1210 memcpy(iv, req->iv, ivsize);
1212 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1213 if (dma_mapping_error(dev, iv_dma)) {
1214 dev_err(dev, "unable to map IV\n");
1215 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1217 qi_cache_free(edesc);
1218 return ERR_PTR(-ENOMEM);
1221 edesc->src_nents = src_nents;
1222 edesc->dst_nents = dst_nents;
1223 edesc->iv_dma = iv_dma;
1224 edesc->qm_sg_bytes = qm_sg_bytes;
1226 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1227 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1229 if (req->src != req->dst)
1230 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1232 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1235 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1237 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1238 dev_err(dev, "unable to map S/G table\n");
1239 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1240 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1241 qi_cache_free(edesc);
1242 return ERR_PTR(-ENOMEM);
1245 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1246 dpaa2_fl_set_final(in_fle, true);
1247 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1248 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1250 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1251 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1253 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1255 if (req->src == req->dst)
1256 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1259 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1265 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1266 struct aead_request *req)
1268 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1269 int ivsize = crypto_aead_ivsize(aead);
1271 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1272 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1273 edesc->qm_sg_bytes);
1274 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1277 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1278 struct skcipher_request *req)
1280 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1281 int ivsize = crypto_skcipher_ivsize(skcipher);
1283 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1284 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1285 edesc->qm_sg_bytes);
1288 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1290 struct crypto_async_request *areq = cbk_ctx;
1291 struct aead_request *req = container_of(areq, struct aead_request,
1293 struct caam_request *req_ctx = to_caam_req(areq);
1294 struct aead_edesc *edesc = req_ctx->edesc;
1295 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1296 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1299 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1301 if (unlikely(status))
1302 ecode = caam_qi2_strstatus(ctx->dev, status);
1304 aead_unmap(ctx->dev, edesc, req);
1305 qi_cache_free(edesc);
1306 aead_request_complete(req, ecode);
1309 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1311 struct crypto_async_request *areq = cbk_ctx;
1312 struct aead_request *req = container_of(areq, struct aead_request,
1314 struct caam_request *req_ctx = to_caam_req(areq);
1315 struct aead_edesc *edesc = req_ctx->edesc;
1316 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1317 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1320 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1322 if (unlikely(status))
1323 ecode = caam_qi2_strstatus(ctx->dev, status);
1325 aead_unmap(ctx->dev, edesc, req);
1326 qi_cache_free(edesc);
1327 aead_request_complete(req, ecode);
1330 static int aead_encrypt(struct aead_request *req)
1332 struct aead_edesc *edesc;
1333 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1334 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1335 struct caam_request *caam_req = aead_request_ctx(req);
1338 /* allocate extended descriptor */
1339 edesc = aead_edesc_alloc(req, true);
1341 return PTR_ERR(edesc);
1343 caam_req->flc = &ctx->flc[ENCRYPT];
1344 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1345 caam_req->cbk = aead_encrypt_done;
1346 caam_req->ctx = &req->base;
1347 caam_req->edesc = edesc;
1348 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1349 if (ret != -EINPROGRESS &&
1350 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1351 aead_unmap(ctx->dev, edesc, req);
1352 qi_cache_free(edesc);
1358 static int aead_decrypt(struct aead_request *req)
1360 struct aead_edesc *edesc;
1361 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1362 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1363 struct caam_request *caam_req = aead_request_ctx(req);
1366 /* allocate extended descriptor */
1367 edesc = aead_edesc_alloc(req, false);
1369 return PTR_ERR(edesc);
1371 caam_req->flc = &ctx->flc[DECRYPT];
1372 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1373 caam_req->cbk = aead_decrypt_done;
1374 caam_req->ctx = &req->base;
1375 caam_req->edesc = edesc;
1376 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1377 if (ret != -EINPROGRESS &&
1378 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1379 aead_unmap(ctx->dev, edesc, req);
1380 qi_cache_free(edesc);
1386 static int ipsec_gcm_encrypt(struct aead_request *req)
1388 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1391 static int ipsec_gcm_decrypt(struct aead_request *req)
1393 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1396 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1398 struct crypto_async_request *areq = cbk_ctx;
1399 struct skcipher_request *req = skcipher_request_cast(areq);
1400 struct caam_request *req_ctx = to_caam_req(areq);
1401 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1402 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1403 struct skcipher_edesc *edesc = req_ctx->edesc;
1405 int ivsize = crypto_skcipher_ivsize(skcipher);
1407 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1409 if (unlikely(status))
1410 ecode = caam_qi2_strstatus(ctx->dev, status);
1412 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1413 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1414 edesc->src_nents > 1 ? 100 : ivsize, 1);
1415 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1416 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1417 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1419 skcipher_unmap(ctx->dev, edesc, req);
1422 * The crypto API expects us to set the IV (req->iv) to the last
1423 * ciphertext block (CBC mode) or last counter (CTR mode).
1424 * This is used e.g. by the CTS mode.
1427 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1430 qi_cache_free(edesc);
1431 skcipher_request_complete(req, ecode);
1434 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1436 struct crypto_async_request *areq = cbk_ctx;
1437 struct skcipher_request *req = skcipher_request_cast(areq);
1438 struct caam_request *req_ctx = to_caam_req(areq);
1439 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1440 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1441 struct skcipher_edesc *edesc = req_ctx->edesc;
1443 int ivsize = crypto_skcipher_ivsize(skcipher);
1445 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1447 if (unlikely(status))
1448 ecode = caam_qi2_strstatus(ctx->dev, status);
1450 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1451 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1452 edesc->src_nents > 1 ? 100 : ivsize, 1);
1453 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1454 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1455 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1457 skcipher_unmap(ctx->dev, edesc, req);
1460 * The crypto API expects us to set the IV (req->iv) to the last
1461 * ciphertext block (CBC mode) or last counter (CTR mode).
1462 * This is used e.g. by the CTS mode.
1465 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1468 qi_cache_free(edesc);
1469 skcipher_request_complete(req, ecode);
1472 static int skcipher_encrypt(struct skcipher_request *req)
1474 struct skcipher_edesc *edesc;
1475 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1476 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1477 struct caam_request *caam_req = skcipher_request_ctx(req);
1483 /* allocate extended descriptor */
1484 edesc = skcipher_edesc_alloc(req);
1486 return PTR_ERR(edesc);
1488 caam_req->flc = &ctx->flc[ENCRYPT];
1489 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1490 caam_req->cbk = skcipher_encrypt_done;
1491 caam_req->ctx = &req->base;
1492 caam_req->edesc = edesc;
1493 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1494 if (ret != -EINPROGRESS &&
1495 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1496 skcipher_unmap(ctx->dev, edesc, req);
1497 qi_cache_free(edesc);
1503 static int skcipher_decrypt(struct skcipher_request *req)
1505 struct skcipher_edesc *edesc;
1506 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1507 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1508 struct caam_request *caam_req = skcipher_request_ctx(req);
1513 /* allocate extended descriptor */
1514 edesc = skcipher_edesc_alloc(req);
1516 return PTR_ERR(edesc);
1518 caam_req->flc = &ctx->flc[DECRYPT];
1519 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1520 caam_req->cbk = skcipher_decrypt_done;
1521 caam_req->ctx = &req->base;
1522 caam_req->edesc = edesc;
1523 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1524 if (ret != -EINPROGRESS &&
1525 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1526 skcipher_unmap(ctx->dev, edesc, req);
1527 qi_cache_free(edesc);
1533 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1536 dma_addr_t dma_addr;
1539 /* copy descriptor header template value */
1540 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1541 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1543 ctx->dev = caam->dev;
1544 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1546 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1547 offsetof(struct caam_ctx, flc_dma),
1548 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1549 if (dma_mapping_error(ctx->dev, dma_addr)) {
1550 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1554 for (i = 0; i < NUM_OP; i++)
1555 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1556 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1561 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1563 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1564 struct caam_skcipher_alg *caam_alg =
1565 container_of(alg, typeof(*caam_alg), skcipher);
1567 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1568 return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
1571 static int caam_cra_init_aead(struct crypto_aead *tfm)
1573 struct aead_alg *alg = crypto_aead_alg(tfm);
1574 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1577 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1578 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1579 !caam_alg->caam.nodkp);
1582 static void caam_exit_common(struct caam_ctx *ctx)
1584 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1585 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1586 DMA_ATTR_SKIP_CPU_SYNC);
1589 static void caam_cra_exit(struct crypto_skcipher *tfm)
1591 caam_exit_common(crypto_skcipher_ctx(tfm));
1594 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1596 caam_exit_common(crypto_aead_ctx(tfm));
1599 static struct caam_skcipher_alg driver_algs[] = {
1603 .cra_name = "cbc(aes)",
1604 .cra_driver_name = "cbc-aes-caam-qi2",
1605 .cra_blocksize = AES_BLOCK_SIZE,
1607 .setkey = aes_skcipher_setkey,
1608 .encrypt = skcipher_encrypt,
1609 .decrypt = skcipher_decrypt,
1610 .min_keysize = AES_MIN_KEY_SIZE,
1611 .max_keysize = AES_MAX_KEY_SIZE,
1612 .ivsize = AES_BLOCK_SIZE,
1614 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1619 .cra_name = "cbc(des3_ede)",
1620 .cra_driver_name = "cbc-3des-caam-qi2",
1621 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1623 .setkey = des3_skcipher_setkey,
1624 .encrypt = skcipher_encrypt,
1625 .decrypt = skcipher_decrypt,
1626 .min_keysize = DES3_EDE_KEY_SIZE,
1627 .max_keysize = DES3_EDE_KEY_SIZE,
1628 .ivsize = DES3_EDE_BLOCK_SIZE,
1630 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1635 .cra_name = "cbc(des)",
1636 .cra_driver_name = "cbc-des-caam-qi2",
1637 .cra_blocksize = DES_BLOCK_SIZE,
1639 .setkey = des_skcipher_setkey,
1640 .encrypt = skcipher_encrypt,
1641 .decrypt = skcipher_decrypt,
1642 .min_keysize = DES_KEY_SIZE,
1643 .max_keysize = DES_KEY_SIZE,
1644 .ivsize = DES_BLOCK_SIZE,
1646 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1651 .cra_name = "ctr(aes)",
1652 .cra_driver_name = "ctr-aes-caam-qi2",
1655 .setkey = ctr_skcipher_setkey,
1656 .encrypt = skcipher_encrypt,
1657 .decrypt = skcipher_decrypt,
1658 .min_keysize = AES_MIN_KEY_SIZE,
1659 .max_keysize = AES_MAX_KEY_SIZE,
1660 .ivsize = AES_BLOCK_SIZE,
1661 .chunksize = AES_BLOCK_SIZE,
1663 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1664 OP_ALG_AAI_CTR_MOD128,
1669 .cra_name = "rfc3686(ctr(aes))",
1670 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1673 .setkey = rfc3686_skcipher_setkey,
1674 .encrypt = skcipher_encrypt,
1675 .decrypt = skcipher_decrypt,
1676 .min_keysize = AES_MIN_KEY_SIZE +
1677 CTR_RFC3686_NONCE_SIZE,
1678 .max_keysize = AES_MAX_KEY_SIZE +
1679 CTR_RFC3686_NONCE_SIZE,
1680 .ivsize = CTR_RFC3686_IV_SIZE,
1681 .chunksize = AES_BLOCK_SIZE,
1684 .class1_alg_type = OP_ALG_ALGSEL_AES |
1685 OP_ALG_AAI_CTR_MOD128,
1692 .cra_name = "xts(aes)",
1693 .cra_driver_name = "xts-aes-caam-qi2",
1694 .cra_blocksize = AES_BLOCK_SIZE,
1696 .setkey = xts_skcipher_setkey,
1697 .encrypt = skcipher_encrypt,
1698 .decrypt = skcipher_decrypt,
1699 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1700 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1701 .ivsize = AES_BLOCK_SIZE,
1703 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1708 .cra_name = "chacha20",
1709 .cra_driver_name = "chacha20-caam-qi2",
1712 .setkey = chacha20_skcipher_setkey,
1713 .encrypt = skcipher_encrypt,
1714 .decrypt = skcipher_decrypt,
1715 .min_keysize = CHACHA_KEY_SIZE,
1716 .max_keysize = CHACHA_KEY_SIZE,
1717 .ivsize = CHACHA_IV_SIZE,
1719 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1723 static struct caam_aead_alg driver_aeads[] = {
1727 .cra_name = "rfc4106(gcm(aes))",
1728 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1731 .setkey = rfc4106_setkey,
1732 .setauthsize = rfc4106_setauthsize,
1733 .encrypt = ipsec_gcm_encrypt,
1734 .decrypt = ipsec_gcm_decrypt,
1736 .maxauthsize = AES_BLOCK_SIZE,
1739 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1746 .cra_name = "rfc4543(gcm(aes))",
1747 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1750 .setkey = rfc4543_setkey,
1751 .setauthsize = rfc4543_setauthsize,
1752 .encrypt = ipsec_gcm_encrypt,
1753 .decrypt = ipsec_gcm_decrypt,
1755 .maxauthsize = AES_BLOCK_SIZE,
1758 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1762 /* Galois Counter Mode */
1766 .cra_name = "gcm(aes)",
1767 .cra_driver_name = "gcm-aes-caam-qi2",
1770 .setkey = gcm_setkey,
1771 .setauthsize = gcm_setauthsize,
1772 .encrypt = aead_encrypt,
1773 .decrypt = aead_decrypt,
1775 .maxauthsize = AES_BLOCK_SIZE,
1778 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1782 /* single-pass ipsec_esp descriptor */
1786 .cra_name = "authenc(hmac(md5),cbc(aes))",
1787 .cra_driver_name = "authenc-hmac-md5-"
1789 .cra_blocksize = AES_BLOCK_SIZE,
1791 .setkey = aead_setkey,
1792 .setauthsize = aead_setauthsize,
1793 .encrypt = aead_encrypt,
1794 .decrypt = aead_decrypt,
1795 .ivsize = AES_BLOCK_SIZE,
1796 .maxauthsize = MD5_DIGEST_SIZE,
1799 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1800 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1801 OP_ALG_AAI_HMAC_PRECOMP,
1807 .cra_name = "echainiv(authenc(hmac(md5),"
1809 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1811 .cra_blocksize = AES_BLOCK_SIZE,
1813 .setkey = aead_setkey,
1814 .setauthsize = aead_setauthsize,
1815 .encrypt = aead_encrypt,
1816 .decrypt = aead_decrypt,
1817 .ivsize = AES_BLOCK_SIZE,
1818 .maxauthsize = MD5_DIGEST_SIZE,
1821 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1822 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1823 OP_ALG_AAI_HMAC_PRECOMP,
1830 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1831 .cra_driver_name = "authenc-hmac-sha1-"
1833 .cra_blocksize = AES_BLOCK_SIZE,
1835 .setkey = aead_setkey,
1836 .setauthsize = aead_setauthsize,
1837 .encrypt = aead_encrypt,
1838 .decrypt = aead_decrypt,
1839 .ivsize = AES_BLOCK_SIZE,
1840 .maxauthsize = SHA1_DIGEST_SIZE,
1843 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1844 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1845 OP_ALG_AAI_HMAC_PRECOMP,
1851 .cra_name = "echainiv(authenc(hmac(sha1),"
1853 .cra_driver_name = "echainiv-authenc-"
1854 "hmac-sha1-cbc-aes-caam-qi2",
1855 .cra_blocksize = AES_BLOCK_SIZE,
1857 .setkey = aead_setkey,
1858 .setauthsize = aead_setauthsize,
1859 .encrypt = aead_encrypt,
1860 .decrypt = aead_decrypt,
1861 .ivsize = AES_BLOCK_SIZE,
1862 .maxauthsize = SHA1_DIGEST_SIZE,
1865 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1866 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1867 OP_ALG_AAI_HMAC_PRECOMP,
1874 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1875 .cra_driver_name = "authenc-hmac-sha224-"
1877 .cra_blocksize = AES_BLOCK_SIZE,
1879 .setkey = aead_setkey,
1880 .setauthsize = aead_setauthsize,
1881 .encrypt = aead_encrypt,
1882 .decrypt = aead_decrypt,
1883 .ivsize = AES_BLOCK_SIZE,
1884 .maxauthsize = SHA224_DIGEST_SIZE,
1887 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1888 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1889 OP_ALG_AAI_HMAC_PRECOMP,
1895 .cra_name = "echainiv(authenc(hmac(sha224),"
1897 .cra_driver_name = "echainiv-authenc-"
1898 "hmac-sha224-cbc-aes-caam-qi2",
1899 .cra_blocksize = AES_BLOCK_SIZE,
1901 .setkey = aead_setkey,
1902 .setauthsize = aead_setauthsize,
1903 .encrypt = aead_encrypt,
1904 .decrypt = aead_decrypt,
1905 .ivsize = AES_BLOCK_SIZE,
1906 .maxauthsize = SHA224_DIGEST_SIZE,
1909 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1910 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1911 OP_ALG_AAI_HMAC_PRECOMP,
1918 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1919 .cra_driver_name = "authenc-hmac-sha256-"
1921 .cra_blocksize = AES_BLOCK_SIZE,
1923 .setkey = aead_setkey,
1924 .setauthsize = aead_setauthsize,
1925 .encrypt = aead_encrypt,
1926 .decrypt = aead_decrypt,
1927 .ivsize = AES_BLOCK_SIZE,
1928 .maxauthsize = SHA256_DIGEST_SIZE,
1931 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1932 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1933 OP_ALG_AAI_HMAC_PRECOMP,
1939 .cra_name = "echainiv(authenc(hmac(sha256),"
1941 .cra_driver_name = "echainiv-authenc-"
1942 "hmac-sha256-cbc-aes-"
1944 .cra_blocksize = AES_BLOCK_SIZE,
1946 .setkey = aead_setkey,
1947 .setauthsize = aead_setauthsize,
1948 .encrypt = aead_encrypt,
1949 .decrypt = aead_decrypt,
1950 .ivsize = AES_BLOCK_SIZE,
1951 .maxauthsize = SHA256_DIGEST_SIZE,
1954 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1963 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1964 .cra_driver_name = "authenc-hmac-sha384-"
1966 .cra_blocksize = AES_BLOCK_SIZE,
1968 .setkey = aead_setkey,
1969 .setauthsize = aead_setauthsize,
1970 .encrypt = aead_encrypt,
1971 .decrypt = aead_decrypt,
1972 .ivsize = AES_BLOCK_SIZE,
1973 .maxauthsize = SHA384_DIGEST_SIZE,
1976 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1977 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1978 OP_ALG_AAI_HMAC_PRECOMP,
1984 .cra_name = "echainiv(authenc(hmac(sha384),"
1986 .cra_driver_name = "echainiv-authenc-"
1987 "hmac-sha384-cbc-aes-"
1989 .cra_blocksize = AES_BLOCK_SIZE,
1991 .setkey = aead_setkey,
1992 .setauthsize = aead_setauthsize,
1993 .encrypt = aead_encrypt,
1994 .decrypt = aead_decrypt,
1995 .ivsize = AES_BLOCK_SIZE,
1996 .maxauthsize = SHA384_DIGEST_SIZE,
1999 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2000 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2001 OP_ALG_AAI_HMAC_PRECOMP,
2008 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2009 .cra_driver_name = "authenc-hmac-sha512-"
2011 .cra_blocksize = AES_BLOCK_SIZE,
2013 .setkey = aead_setkey,
2014 .setauthsize = aead_setauthsize,
2015 .encrypt = aead_encrypt,
2016 .decrypt = aead_decrypt,
2017 .ivsize = AES_BLOCK_SIZE,
2018 .maxauthsize = SHA512_DIGEST_SIZE,
2021 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2022 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2023 OP_ALG_AAI_HMAC_PRECOMP,
2029 .cra_name = "echainiv(authenc(hmac(sha512),"
2031 .cra_driver_name = "echainiv-authenc-"
2032 "hmac-sha512-cbc-aes-"
2034 .cra_blocksize = AES_BLOCK_SIZE,
2036 .setkey = aead_setkey,
2037 .setauthsize = aead_setauthsize,
2038 .encrypt = aead_encrypt,
2039 .decrypt = aead_decrypt,
2040 .ivsize = AES_BLOCK_SIZE,
2041 .maxauthsize = SHA512_DIGEST_SIZE,
2044 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2045 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2046 OP_ALG_AAI_HMAC_PRECOMP,
2053 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2054 .cra_driver_name = "authenc-hmac-md5-"
2055 "cbc-des3_ede-caam-qi2",
2056 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2058 .setkey = des3_aead_setkey,
2059 .setauthsize = aead_setauthsize,
2060 .encrypt = aead_encrypt,
2061 .decrypt = aead_decrypt,
2062 .ivsize = DES3_EDE_BLOCK_SIZE,
2063 .maxauthsize = MD5_DIGEST_SIZE,
2066 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2067 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2068 OP_ALG_AAI_HMAC_PRECOMP,
2074 .cra_name = "echainiv(authenc(hmac(md5),"
2076 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2077 "cbc-des3_ede-caam-qi2",
2078 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2080 .setkey = des3_aead_setkey,
2081 .setauthsize = aead_setauthsize,
2082 .encrypt = aead_encrypt,
2083 .decrypt = aead_decrypt,
2084 .ivsize = DES3_EDE_BLOCK_SIZE,
2085 .maxauthsize = MD5_DIGEST_SIZE,
2088 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2089 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2090 OP_ALG_AAI_HMAC_PRECOMP,
2097 .cra_name = "authenc(hmac(sha1),"
2099 .cra_driver_name = "authenc-hmac-sha1-"
2100 "cbc-des3_ede-caam-qi2",
2101 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2103 .setkey = des3_aead_setkey,
2104 .setauthsize = aead_setauthsize,
2105 .encrypt = aead_encrypt,
2106 .decrypt = aead_decrypt,
2107 .ivsize = DES3_EDE_BLOCK_SIZE,
2108 .maxauthsize = SHA1_DIGEST_SIZE,
2111 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2112 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2113 OP_ALG_AAI_HMAC_PRECOMP,
2119 .cra_name = "echainiv(authenc(hmac(sha1),"
2121 .cra_driver_name = "echainiv-authenc-"
2123 "cbc-des3_ede-caam-qi2",
2124 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2126 .setkey = des3_aead_setkey,
2127 .setauthsize = aead_setauthsize,
2128 .encrypt = aead_encrypt,
2129 .decrypt = aead_decrypt,
2130 .ivsize = DES3_EDE_BLOCK_SIZE,
2131 .maxauthsize = SHA1_DIGEST_SIZE,
2134 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2135 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2136 OP_ALG_AAI_HMAC_PRECOMP,
2143 .cra_name = "authenc(hmac(sha224),"
2145 .cra_driver_name = "authenc-hmac-sha224-"
2146 "cbc-des3_ede-caam-qi2",
2147 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2149 .setkey = des3_aead_setkey,
2150 .setauthsize = aead_setauthsize,
2151 .encrypt = aead_encrypt,
2152 .decrypt = aead_decrypt,
2153 .ivsize = DES3_EDE_BLOCK_SIZE,
2154 .maxauthsize = SHA224_DIGEST_SIZE,
2157 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2158 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2159 OP_ALG_AAI_HMAC_PRECOMP,
2165 .cra_name = "echainiv(authenc(hmac(sha224),"
2167 .cra_driver_name = "echainiv-authenc-"
2169 "cbc-des3_ede-caam-qi2",
2170 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2172 .setkey = des3_aead_setkey,
2173 .setauthsize = aead_setauthsize,
2174 .encrypt = aead_encrypt,
2175 .decrypt = aead_decrypt,
2176 .ivsize = DES3_EDE_BLOCK_SIZE,
2177 .maxauthsize = SHA224_DIGEST_SIZE,
2180 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2181 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2182 OP_ALG_AAI_HMAC_PRECOMP,
2189 .cra_name = "authenc(hmac(sha256),"
2191 .cra_driver_name = "authenc-hmac-sha256-"
2192 "cbc-des3_ede-caam-qi2",
2193 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2195 .setkey = des3_aead_setkey,
2196 .setauthsize = aead_setauthsize,
2197 .encrypt = aead_encrypt,
2198 .decrypt = aead_decrypt,
2199 .ivsize = DES3_EDE_BLOCK_SIZE,
2200 .maxauthsize = SHA256_DIGEST_SIZE,
2203 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2204 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2205 OP_ALG_AAI_HMAC_PRECOMP,
2211 .cra_name = "echainiv(authenc(hmac(sha256),"
2213 .cra_driver_name = "echainiv-authenc-"
2215 "cbc-des3_ede-caam-qi2",
2216 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2218 .setkey = des3_aead_setkey,
2219 .setauthsize = aead_setauthsize,
2220 .encrypt = aead_encrypt,
2221 .decrypt = aead_decrypt,
2222 .ivsize = DES3_EDE_BLOCK_SIZE,
2223 .maxauthsize = SHA256_DIGEST_SIZE,
2226 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2227 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2228 OP_ALG_AAI_HMAC_PRECOMP,
2235 .cra_name = "authenc(hmac(sha384),"
2237 .cra_driver_name = "authenc-hmac-sha384-"
2238 "cbc-des3_ede-caam-qi2",
2239 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2241 .setkey = des3_aead_setkey,
2242 .setauthsize = aead_setauthsize,
2243 .encrypt = aead_encrypt,
2244 .decrypt = aead_decrypt,
2245 .ivsize = DES3_EDE_BLOCK_SIZE,
2246 .maxauthsize = SHA384_DIGEST_SIZE,
2249 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2250 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2251 OP_ALG_AAI_HMAC_PRECOMP,
2257 .cra_name = "echainiv(authenc(hmac(sha384),"
2259 .cra_driver_name = "echainiv-authenc-"
2261 "cbc-des3_ede-caam-qi2",
2262 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2264 .setkey = des3_aead_setkey,
2265 .setauthsize = aead_setauthsize,
2266 .encrypt = aead_encrypt,
2267 .decrypt = aead_decrypt,
2268 .ivsize = DES3_EDE_BLOCK_SIZE,
2269 .maxauthsize = SHA384_DIGEST_SIZE,
2272 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2273 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2274 OP_ALG_AAI_HMAC_PRECOMP,
2281 .cra_name = "authenc(hmac(sha512),"
2283 .cra_driver_name = "authenc-hmac-sha512-"
2284 "cbc-des3_ede-caam-qi2",
2285 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2287 .setkey = des3_aead_setkey,
2288 .setauthsize = aead_setauthsize,
2289 .encrypt = aead_encrypt,
2290 .decrypt = aead_decrypt,
2291 .ivsize = DES3_EDE_BLOCK_SIZE,
2292 .maxauthsize = SHA512_DIGEST_SIZE,
2295 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2296 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2297 OP_ALG_AAI_HMAC_PRECOMP,
2303 .cra_name = "echainiv(authenc(hmac(sha512),"
2305 .cra_driver_name = "echainiv-authenc-"
2307 "cbc-des3_ede-caam-qi2",
2308 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2310 .setkey = des3_aead_setkey,
2311 .setauthsize = aead_setauthsize,
2312 .encrypt = aead_encrypt,
2313 .decrypt = aead_decrypt,
2314 .ivsize = DES3_EDE_BLOCK_SIZE,
2315 .maxauthsize = SHA512_DIGEST_SIZE,
2318 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2319 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2320 OP_ALG_AAI_HMAC_PRECOMP,
2327 .cra_name = "authenc(hmac(md5),cbc(des))",
2328 .cra_driver_name = "authenc-hmac-md5-"
2330 .cra_blocksize = DES_BLOCK_SIZE,
2332 .setkey = aead_setkey,
2333 .setauthsize = aead_setauthsize,
2334 .encrypt = aead_encrypt,
2335 .decrypt = aead_decrypt,
2336 .ivsize = DES_BLOCK_SIZE,
2337 .maxauthsize = MD5_DIGEST_SIZE,
2340 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2341 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2342 OP_ALG_AAI_HMAC_PRECOMP,
2348 .cra_name = "echainiv(authenc(hmac(md5),"
2350 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2352 .cra_blocksize = DES_BLOCK_SIZE,
2354 .setkey = aead_setkey,
2355 .setauthsize = aead_setauthsize,
2356 .encrypt = aead_encrypt,
2357 .decrypt = aead_decrypt,
2358 .ivsize = DES_BLOCK_SIZE,
2359 .maxauthsize = MD5_DIGEST_SIZE,
2362 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2363 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2364 OP_ALG_AAI_HMAC_PRECOMP,
2371 .cra_name = "authenc(hmac(sha1),cbc(des))",
2372 .cra_driver_name = "authenc-hmac-sha1-"
2374 .cra_blocksize = DES_BLOCK_SIZE,
2376 .setkey = aead_setkey,
2377 .setauthsize = aead_setauthsize,
2378 .encrypt = aead_encrypt,
2379 .decrypt = aead_decrypt,
2380 .ivsize = DES_BLOCK_SIZE,
2381 .maxauthsize = SHA1_DIGEST_SIZE,
2384 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2385 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2386 OP_ALG_AAI_HMAC_PRECOMP,
2392 .cra_name = "echainiv(authenc(hmac(sha1),"
2394 .cra_driver_name = "echainiv-authenc-"
2395 "hmac-sha1-cbc-des-caam-qi2",
2396 .cra_blocksize = DES_BLOCK_SIZE,
2398 .setkey = aead_setkey,
2399 .setauthsize = aead_setauthsize,
2400 .encrypt = aead_encrypt,
2401 .decrypt = aead_decrypt,
2402 .ivsize = DES_BLOCK_SIZE,
2403 .maxauthsize = SHA1_DIGEST_SIZE,
2406 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2407 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2408 OP_ALG_AAI_HMAC_PRECOMP,
2415 .cra_name = "authenc(hmac(sha224),cbc(des))",
2416 .cra_driver_name = "authenc-hmac-sha224-"
2418 .cra_blocksize = DES_BLOCK_SIZE,
2420 .setkey = aead_setkey,
2421 .setauthsize = aead_setauthsize,
2422 .encrypt = aead_encrypt,
2423 .decrypt = aead_decrypt,
2424 .ivsize = DES_BLOCK_SIZE,
2425 .maxauthsize = SHA224_DIGEST_SIZE,
2428 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2429 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2430 OP_ALG_AAI_HMAC_PRECOMP,
2436 .cra_name = "echainiv(authenc(hmac(sha224),"
2438 .cra_driver_name = "echainiv-authenc-"
2439 "hmac-sha224-cbc-des-"
2441 .cra_blocksize = DES_BLOCK_SIZE,
2443 .setkey = aead_setkey,
2444 .setauthsize = aead_setauthsize,
2445 .encrypt = aead_encrypt,
2446 .decrypt = aead_decrypt,
2447 .ivsize = DES_BLOCK_SIZE,
2448 .maxauthsize = SHA224_DIGEST_SIZE,
2451 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2452 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2453 OP_ALG_AAI_HMAC_PRECOMP,
2460 .cra_name = "authenc(hmac(sha256),cbc(des))",
2461 .cra_driver_name = "authenc-hmac-sha256-"
2463 .cra_blocksize = DES_BLOCK_SIZE,
2465 .setkey = aead_setkey,
2466 .setauthsize = aead_setauthsize,
2467 .encrypt = aead_encrypt,
2468 .decrypt = aead_decrypt,
2469 .ivsize = DES_BLOCK_SIZE,
2470 .maxauthsize = SHA256_DIGEST_SIZE,
2473 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2474 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2475 OP_ALG_AAI_HMAC_PRECOMP,
2481 .cra_name = "echainiv(authenc(hmac(sha256),"
2483 .cra_driver_name = "echainiv-authenc-"
2484 "hmac-sha256-cbc-desi-"
2486 .cra_blocksize = DES_BLOCK_SIZE,
2488 .setkey = aead_setkey,
2489 .setauthsize = aead_setauthsize,
2490 .encrypt = aead_encrypt,
2491 .decrypt = aead_decrypt,
2492 .ivsize = DES_BLOCK_SIZE,
2493 .maxauthsize = SHA256_DIGEST_SIZE,
2496 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2497 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2498 OP_ALG_AAI_HMAC_PRECOMP,
2505 .cra_name = "authenc(hmac(sha384),cbc(des))",
2506 .cra_driver_name = "authenc-hmac-sha384-"
2508 .cra_blocksize = DES_BLOCK_SIZE,
2510 .setkey = aead_setkey,
2511 .setauthsize = aead_setauthsize,
2512 .encrypt = aead_encrypt,
2513 .decrypt = aead_decrypt,
2514 .ivsize = DES_BLOCK_SIZE,
2515 .maxauthsize = SHA384_DIGEST_SIZE,
2518 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2519 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2520 OP_ALG_AAI_HMAC_PRECOMP,
2526 .cra_name = "echainiv(authenc(hmac(sha384),"
2528 .cra_driver_name = "echainiv-authenc-"
2529 "hmac-sha384-cbc-des-"
2531 .cra_blocksize = DES_BLOCK_SIZE,
2533 .setkey = aead_setkey,
2534 .setauthsize = aead_setauthsize,
2535 .encrypt = aead_encrypt,
2536 .decrypt = aead_decrypt,
2537 .ivsize = DES_BLOCK_SIZE,
2538 .maxauthsize = SHA384_DIGEST_SIZE,
2541 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2542 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2543 OP_ALG_AAI_HMAC_PRECOMP,
2550 .cra_name = "authenc(hmac(sha512),cbc(des))",
2551 .cra_driver_name = "authenc-hmac-sha512-"
2553 .cra_blocksize = DES_BLOCK_SIZE,
2555 .setkey = aead_setkey,
2556 .setauthsize = aead_setauthsize,
2557 .encrypt = aead_encrypt,
2558 .decrypt = aead_decrypt,
2559 .ivsize = DES_BLOCK_SIZE,
2560 .maxauthsize = SHA512_DIGEST_SIZE,
2563 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2564 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2565 OP_ALG_AAI_HMAC_PRECOMP,
2571 .cra_name = "echainiv(authenc(hmac(sha512),"
2573 .cra_driver_name = "echainiv-authenc-"
2574 "hmac-sha512-cbc-des-"
2576 .cra_blocksize = DES_BLOCK_SIZE,
2578 .setkey = aead_setkey,
2579 .setauthsize = aead_setauthsize,
2580 .encrypt = aead_encrypt,
2581 .decrypt = aead_decrypt,
2582 .ivsize = DES_BLOCK_SIZE,
2583 .maxauthsize = SHA512_DIGEST_SIZE,
2586 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2587 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2588 OP_ALG_AAI_HMAC_PRECOMP,
2595 .cra_name = "authenc(hmac(md5),"
2596 "rfc3686(ctr(aes)))",
2597 .cra_driver_name = "authenc-hmac-md5-"
2598 "rfc3686-ctr-aes-caam-qi2",
2601 .setkey = aead_setkey,
2602 .setauthsize = aead_setauthsize,
2603 .encrypt = aead_encrypt,
2604 .decrypt = aead_decrypt,
2605 .ivsize = CTR_RFC3686_IV_SIZE,
2606 .maxauthsize = MD5_DIGEST_SIZE,
2609 .class1_alg_type = OP_ALG_ALGSEL_AES |
2610 OP_ALG_AAI_CTR_MOD128,
2611 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2612 OP_ALG_AAI_HMAC_PRECOMP,
2619 .cra_name = "seqiv(authenc("
2620 "hmac(md5),rfc3686(ctr(aes))))",
2621 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2622 "rfc3686-ctr-aes-caam-qi2",
2625 .setkey = aead_setkey,
2626 .setauthsize = aead_setauthsize,
2627 .encrypt = aead_encrypt,
2628 .decrypt = aead_decrypt,
2629 .ivsize = CTR_RFC3686_IV_SIZE,
2630 .maxauthsize = MD5_DIGEST_SIZE,
2633 .class1_alg_type = OP_ALG_ALGSEL_AES |
2634 OP_ALG_AAI_CTR_MOD128,
2635 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2636 OP_ALG_AAI_HMAC_PRECOMP,
2644 .cra_name = "authenc(hmac(sha1),"
2645 "rfc3686(ctr(aes)))",
2646 .cra_driver_name = "authenc-hmac-sha1-"
2647 "rfc3686-ctr-aes-caam-qi2",
2650 .setkey = aead_setkey,
2651 .setauthsize = aead_setauthsize,
2652 .encrypt = aead_encrypt,
2653 .decrypt = aead_decrypt,
2654 .ivsize = CTR_RFC3686_IV_SIZE,
2655 .maxauthsize = SHA1_DIGEST_SIZE,
2658 .class1_alg_type = OP_ALG_ALGSEL_AES |
2659 OP_ALG_AAI_CTR_MOD128,
2660 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2661 OP_ALG_AAI_HMAC_PRECOMP,
2668 .cra_name = "seqiv(authenc("
2669 "hmac(sha1),rfc3686(ctr(aes))))",
2670 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2671 "rfc3686-ctr-aes-caam-qi2",
2674 .setkey = aead_setkey,
2675 .setauthsize = aead_setauthsize,
2676 .encrypt = aead_encrypt,
2677 .decrypt = aead_decrypt,
2678 .ivsize = CTR_RFC3686_IV_SIZE,
2679 .maxauthsize = SHA1_DIGEST_SIZE,
2682 .class1_alg_type = OP_ALG_ALGSEL_AES |
2683 OP_ALG_AAI_CTR_MOD128,
2684 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2685 OP_ALG_AAI_HMAC_PRECOMP,
2693 .cra_name = "authenc(hmac(sha224),"
2694 "rfc3686(ctr(aes)))",
2695 .cra_driver_name = "authenc-hmac-sha224-"
2696 "rfc3686-ctr-aes-caam-qi2",
2699 .setkey = aead_setkey,
2700 .setauthsize = aead_setauthsize,
2701 .encrypt = aead_encrypt,
2702 .decrypt = aead_decrypt,
2703 .ivsize = CTR_RFC3686_IV_SIZE,
2704 .maxauthsize = SHA224_DIGEST_SIZE,
2707 .class1_alg_type = OP_ALG_ALGSEL_AES |
2708 OP_ALG_AAI_CTR_MOD128,
2709 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2710 OP_ALG_AAI_HMAC_PRECOMP,
2717 .cra_name = "seqiv(authenc("
2718 "hmac(sha224),rfc3686(ctr(aes))))",
2719 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2720 "rfc3686-ctr-aes-caam-qi2",
2723 .setkey = aead_setkey,
2724 .setauthsize = aead_setauthsize,
2725 .encrypt = aead_encrypt,
2726 .decrypt = aead_decrypt,
2727 .ivsize = CTR_RFC3686_IV_SIZE,
2728 .maxauthsize = SHA224_DIGEST_SIZE,
2731 .class1_alg_type = OP_ALG_ALGSEL_AES |
2732 OP_ALG_AAI_CTR_MOD128,
2733 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2734 OP_ALG_AAI_HMAC_PRECOMP,
2742 .cra_name = "authenc(hmac(sha256),"
2743 "rfc3686(ctr(aes)))",
2744 .cra_driver_name = "authenc-hmac-sha256-"
2745 "rfc3686-ctr-aes-caam-qi2",
2748 .setkey = aead_setkey,
2749 .setauthsize = aead_setauthsize,
2750 .encrypt = aead_encrypt,
2751 .decrypt = aead_decrypt,
2752 .ivsize = CTR_RFC3686_IV_SIZE,
2753 .maxauthsize = SHA256_DIGEST_SIZE,
2756 .class1_alg_type = OP_ALG_ALGSEL_AES |
2757 OP_ALG_AAI_CTR_MOD128,
2758 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2759 OP_ALG_AAI_HMAC_PRECOMP,
2766 .cra_name = "seqiv(authenc(hmac(sha256),"
2767 "rfc3686(ctr(aes))))",
2768 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2769 "rfc3686-ctr-aes-caam-qi2",
2772 .setkey = aead_setkey,
2773 .setauthsize = aead_setauthsize,
2774 .encrypt = aead_encrypt,
2775 .decrypt = aead_decrypt,
2776 .ivsize = CTR_RFC3686_IV_SIZE,
2777 .maxauthsize = SHA256_DIGEST_SIZE,
2780 .class1_alg_type = OP_ALG_ALGSEL_AES |
2781 OP_ALG_AAI_CTR_MOD128,
2782 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2783 OP_ALG_AAI_HMAC_PRECOMP,
2791 .cra_name = "authenc(hmac(sha384),"
2792 "rfc3686(ctr(aes)))",
2793 .cra_driver_name = "authenc-hmac-sha384-"
2794 "rfc3686-ctr-aes-caam-qi2",
2797 .setkey = aead_setkey,
2798 .setauthsize = aead_setauthsize,
2799 .encrypt = aead_encrypt,
2800 .decrypt = aead_decrypt,
2801 .ivsize = CTR_RFC3686_IV_SIZE,
2802 .maxauthsize = SHA384_DIGEST_SIZE,
2805 .class1_alg_type = OP_ALG_ALGSEL_AES |
2806 OP_ALG_AAI_CTR_MOD128,
2807 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2808 OP_ALG_AAI_HMAC_PRECOMP,
2815 .cra_name = "seqiv(authenc(hmac(sha384),"
2816 "rfc3686(ctr(aes))))",
2817 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2818 "rfc3686-ctr-aes-caam-qi2",
2821 .setkey = aead_setkey,
2822 .setauthsize = aead_setauthsize,
2823 .encrypt = aead_encrypt,
2824 .decrypt = aead_decrypt,
2825 .ivsize = CTR_RFC3686_IV_SIZE,
2826 .maxauthsize = SHA384_DIGEST_SIZE,
2829 .class1_alg_type = OP_ALG_ALGSEL_AES |
2830 OP_ALG_AAI_CTR_MOD128,
2831 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2832 OP_ALG_AAI_HMAC_PRECOMP,
2840 .cra_name = "rfc7539(chacha20,poly1305)",
2841 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2845 .setkey = chachapoly_setkey,
2846 .setauthsize = chachapoly_setauthsize,
2847 .encrypt = aead_encrypt,
2848 .decrypt = aead_decrypt,
2849 .ivsize = CHACHAPOLY_IV_SIZE,
2850 .maxauthsize = POLY1305_DIGEST_SIZE,
2853 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2855 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2863 .cra_name = "rfc7539esp(chacha20,poly1305)",
2864 .cra_driver_name = "rfc7539esp-chacha20-"
2865 "poly1305-caam-qi2",
2868 .setkey = chachapoly_setkey,
2869 .setauthsize = chachapoly_setauthsize,
2870 .encrypt = aead_encrypt,
2871 .decrypt = aead_decrypt,
2873 .maxauthsize = POLY1305_DIGEST_SIZE,
2876 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2878 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2886 .cra_name = "authenc(hmac(sha512),"
2887 "rfc3686(ctr(aes)))",
2888 .cra_driver_name = "authenc-hmac-sha512-"
2889 "rfc3686-ctr-aes-caam-qi2",
2892 .setkey = aead_setkey,
2893 .setauthsize = aead_setauthsize,
2894 .encrypt = aead_encrypt,
2895 .decrypt = aead_decrypt,
2896 .ivsize = CTR_RFC3686_IV_SIZE,
2897 .maxauthsize = SHA512_DIGEST_SIZE,
2900 .class1_alg_type = OP_ALG_ALGSEL_AES |
2901 OP_ALG_AAI_CTR_MOD128,
2902 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2903 OP_ALG_AAI_HMAC_PRECOMP,
2910 .cra_name = "seqiv(authenc(hmac(sha512),"
2911 "rfc3686(ctr(aes))))",
2912 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2913 "rfc3686-ctr-aes-caam-qi2",
2916 .setkey = aead_setkey,
2917 .setauthsize = aead_setauthsize,
2918 .encrypt = aead_encrypt,
2919 .decrypt = aead_decrypt,
2920 .ivsize = CTR_RFC3686_IV_SIZE,
2921 .maxauthsize = SHA512_DIGEST_SIZE,
2924 .class1_alg_type = OP_ALG_ALGSEL_AES |
2925 OP_ALG_AAI_CTR_MOD128,
2926 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2927 OP_ALG_AAI_HMAC_PRECOMP,
2934 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
2936 struct skcipher_alg *alg = &t_alg->skcipher;
2938 alg->base.cra_module = THIS_MODULE;
2939 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2940 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2941 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2943 alg->init = caam_cra_init_skcipher;
2944 alg->exit = caam_cra_exit;
2947 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
2949 struct aead_alg *alg = &t_alg->aead;
2951 alg->base.cra_module = THIS_MODULE;
2952 alg->base.cra_priority = CAAM_CRA_PRIORITY;
2953 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
2954 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2956 alg->init = caam_cra_init_aead;
2957 alg->exit = caam_cra_exit_aead;
2960 /* max hash key is max split key size */
2961 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
2963 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
2965 /* caam context sizes for hashes: running digest + 8 */
2966 #define HASH_MSG_LEN 8
2967 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
2978 * caam_hash_ctx - ahash per-session context
2979 * @flc: Flow Contexts array
2980 * @key: authentication key
2981 * @flc_dma: I/O virtual addresses of the Flow Contexts
2982 * @dev: dpseci device
2983 * @ctx_len: size of Context Register
2984 * @adata: hashing algorithm details
2986 struct caam_hash_ctx {
2987 struct caam_flc flc[HASH_NUM_OP];
2988 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
2989 dma_addr_t flc_dma[HASH_NUM_OP];
2992 struct alginfo adata;
2996 struct caam_hash_state {
2997 struct caam_request caam_req;
3001 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3003 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3005 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3006 int (*update)(struct ahash_request *req);
3007 int (*final)(struct ahash_request *req);
3008 int (*finup)(struct ahash_request *req);
3012 struct caam_export_state {
3013 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3014 u8 caam_ctx[MAX_CTX_LEN];
3016 int (*update)(struct ahash_request *req);
3017 int (*final)(struct ahash_request *req);
3018 int (*finup)(struct ahash_request *req);
3021 static inline void switch_buf(struct caam_hash_state *state)
3023 state->current_buf ^= 1;
3026 static inline u8 *current_buf(struct caam_hash_state *state)
3028 return state->current_buf ? state->buf_1 : state->buf_0;
3031 static inline u8 *alt_buf(struct caam_hash_state *state)
3033 return state->current_buf ? state->buf_0 : state->buf_1;
3036 static inline int *current_buflen(struct caam_hash_state *state)
3038 return state->current_buf ? &state->buflen_1 : &state->buflen_0;
3041 static inline int *alt_buflen(struct caam_hash_state *state)
3043 return state->current_buf ? &state->buflen_0 : &state->buflen_1;
3046 /* Map current buffer in state (if length > 0) and put it in link table */
3047 static inline int buf_map_to_qm_sg(struct device *dev,
3048 struct dpaa2_sg_entry *qm_sg,
3049 struct caam_hash_state *state)
3051 int buflen = *current_buflen(state);
3056 state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
3058 if (dma_mapping_error(dev, state->buf_dma)) {
3059 dev_err(dev, "unable to map buf\n");
3064 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3069 /* Map state->caam_ctx, and add it to link table */
3070 static inline int ctx_map_to_qm_sg(struct device *dev,
3071 struct caam_hash_state *state, int ctx_len,
3072 struct dpaa2_sg_entry *qm_sg, u32 flag)
3074 state->ctx_dma_len = ctx_len;
3075 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3076 if (dma_mapping_error(dev, state->ctx_dma)) {
3077 dev_err(dev, "unable to map ctx\n");
3082 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3087 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3089 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3090 int digestsize = crypto_ahash_digestsize(ahash);
3091 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3092 struct caam_flc *flc;
3095 /* ahash_update shared descriptor */
3096 flc = &ctx->flc[UPDATE];
3097 desc = flc->sh_desc;
3098 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3099 ctx->ctx_len, true, priv->sec_attr.era);
3100 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3101 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3102 desc_bytes(desc), DMA_BIDIRECTIONAL);
3103 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3104 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3107 /* ahash_update_first shared descriptor */
3108 flc = &ctx->flc[UPDATE_FIRST];
3109 desc = flc->sh_desc;
3110 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3111 ctx->ctx_len, false, priv->sec_attr.era);
3112 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3113 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3114 desc_bytes(desc), DMA_BIDIRECTIONAL);
3115 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3116 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3119 /* ahash_final shared descriptor */
3120 flc = &ctx->flc[FINALIZE];
3121 desc = flc->sh_desc;
3122 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3123 ctx->ctx_len, true, priv->sec_attr.era);
3124 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3125 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3126 desc_bytes(desc), DMA_BIDIRECTIONAL);
3127 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3128 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3131 /* ahash_digest shared descriptor */
3132 flc = &ctx->flc[DIGEST];
3133 desc = flc->sh_desc;
3134 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3135 ctx->ctx_len, false, priv->sec_attr.era);
3136 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3137 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3138 desc_bytes(desc), DMA_BIDIRECTIONAL);
3139 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3140 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3146 struct split_key_sh_result {
3147 struct completion completion;
3152 static void split_key_sh_done(void *cbk_ctx, u32 err)
3154 struct split_key_sh_result *res = cbk_ctx;
3156 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3158 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3159 complete(&res->completion);
3162 /* Digest hash size if it is too large */
3163 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3166 struct caam_request *req_ctx;
3168 struct split_key_sh_result result;
3170 struct caam_flc *flc;
3173 struct dpaa2_fl_entry *in_fle, *out_fle;
3175 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3179 in_fle = &req_ctx->fd_flt[1];
3180 out_fle = &req_ctx->fd_flt[0];
3182 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3186 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3187 if (dma_mapping_error(ctx->dev, key_dma)) {
3188 dev_err(ctx->dev, "unable to map key memory\n");
3192 desc = flc->sh_desc;
3194 init_sh_desc(desc, 0);
3196 /* descriptor to perform unkeyed hash on key_in */
3197 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3198 OP_ALG_AS_INITFINAL);
3199 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3200 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3201 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3202 LDST_SRCDST_BYTE_CONTEXT);
3204 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3205 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3206 desc_bytes(desc), DMA_TO_DEVICE);
3207 if (dma_mapping_error(ctx->dev, flc_dma)) {
3208 dev_err(ctx->dev, "unable to map shared descriptor\n");
3212 dpaa2_fl_set_final(in_fle, true);
3213 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3214 dpaa2_fl_set_addr(in_fle, key_dma);
3215 dpaa2_fl_set_len(in_fle, *keylen);
3216 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3217 dpaa2_fl_set_addr(out_fle, key_dma);
3218 dpaa2_fl_set_len(out_fle, digestsize);
3220 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3221 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3222 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3223 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3227 init_completion(&result.completion);
3228 result.dev = ctx->dev;
3231 req_ctx->flc_dma = flc_dma;
3232 req_ctx->cbk = split_key_sh_done;
3233 req_ctx->ctx = &result;
3235 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3236 if (ret == -EINPROGRESS) {
3238 wait_for_completion(&result.completion);
3240 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3241 DUMP_PREFIX_ADDRESS, 16, 4, key,
3245 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3248 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3254 *keylen = digestsize;
3259 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3260 unsigned int keylen)
3262 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3263 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3264 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3266 u8 *hashed_key = NULL;
3268 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3270 if (keylen > blocksize) {
3271 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3274 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3280 ctx->adata.keylen = keylen;
3281 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3282 OP_ALG_ALGSEL_MASK);
3283 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3286 ctx->adata.key_virt = key;
3287 ctx->adata.key_inline = true;
3290 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3291 * in invalid opcodes (last bytes of user key) in the resulting
3292 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3293 * addresses are needed.
3295 if (keylen > ctx->adata.keylen_pad) {
3296 memcpy(ctx->key, key, keylen);
3297 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3298 ctx->adata.keylen_pad,
3302 ret = ahash_set_sh_desc(ahash);
3307 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
3311 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3312 struct ahash_request *req)
3314 struct caam_hash_state *state = ahash_request_ctx(req);
3316 if (edesc->src_nents)
3317 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3319 if (edesc->qm_sg_bytes)
3320 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3323 if (state->buf_dma) {
3324 dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
3330 static inline void ahash_unmap_ctx(struct device *dev,
3331 struct ahash_edesc *edesc,
3332 struct ahash_request *req, u32 flag)
3334 struct caam_hash_state *state = ahash_request_ctx(req);
3336 if (state->ctx_dma) {
3337 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3340 ahash_unmap(dev, edesc, req);
3343 static void ahash_done(void *cbk_ctx, u32 status)
3345 struct crypto_async_request *areq = cbk_ctx;
3346 struct ahash_request *req = ahash_request_cast(areq);
3347 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3348 struct caam_hash_state *state = ahash_request_ctx(req);
3349 struct ahash_edesc *edesc = state->caam_req.edesc;
3350 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3351 int digestsize = crypto_ahash_digestsize(ahash);
3354 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3356 if (unlikely(status))
3357 ecode = caam_qi2_strstatus(ctx->dev, status);
3359 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3360 memcpy(req->result, state->caam_ctx, digestsize);
3361 qi_cache_free(edesc);
3363 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3364 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3367 req->base.complete(&req->base, ecode);
3370 static void ahash_done_bi(void *cbk_ctx, u32 status)
3372 struct crypto_async_request *areq = cbk_ctx;
3373 struct ahash_request *req = ahash_request_cast(areq);
3374 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3375 struct caam_hash_state *state = ahash_request_ctx(req);
3376 struct ahash_edesc *edesc = state->caam_req.edesc;
3377 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3380 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3382 if (unlikely(status))
3383 ecode = caam_qi2_strstatus(ctx->dev, status);
3385 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3387 qi_cache_free(edesc);
3389 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3390 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3393 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3394 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3395 crypto_ahash_digestsize(ahash), 1);
3397 req->base.complete(&req->base, ecode);
3400 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3402 struct crypto_async_request *areq = cbk_ctx;
3403 struct ahash_request *req = ahash_request_cast(areq);
3404 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3405 struct caam_hash_state *state = ahash_request_ctx(req);
3406 struct ahash_edesc *edesc = state->caam_req.edesc;
3407 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3408 int digestsize = crypto_ahash_digestsize(ahash);
3411 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3413 if (unlikely(status))
3414 ecode = caam_qi2_strstatus(ctx->dev, status);
3416 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3417 memcpy(req->result, state->caam_ctx, digestsize);
3418 qi_cache_free(edesc);
3420 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3421 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3424 req->base.complete(&req->base, ecode);
3427 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3429 struct crypto_async_request *areq = cbk_ctx;
3430 struct ahash_request *req = ahash_request_cast(areq);
3431 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3432 struct caam_hash_state *state = ahash_request_ctx(req);
3433 struct ahash_edesc *edesc = state->caam_req.edesc;
3434 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3437 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3439 if (unlikely(status))
3440 ecode = caam_qi2_strstatus(ctx->dev, status);
3442 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3444 qi_cache_free(edesc);
3446 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3447 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3450 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3451 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3452 crypto_ahash_digestsize(ahash), 1);
3454 req->base.complete(&req->base, ecode);
3457 static int ahash_update_ctx(struct ahash_request *req)
3459 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3460 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3461 struct caam_hash_state *state = ahash_request_ctx(req);
3462 struct caam_request *req_ctx = &state->caam_req;
3463 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3464 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3465 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3466 GFP_KERNEL : GFP_ATOMIC;
3467 u8 *buf = current_buf(state);
3468 int *buflen = current_buflen(state);
3469 u8 *next_buf = alt_buf(state);
3470 int *next_buflen = alt_buflen(state), last_buflen;
3471 int in_len = *buflen + req->nbytes, to_hash;
3472 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3473 struct ahash_edesc *edesc;
3476 last_buflen = *next_buflen;
3477 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3478 to_hash = in_len - *next_buflen;
3481 struct dpaa2_sg_entry *sg_table;
3482 int src_len = req->nbytes - *next_buflen;
3484 src_nents = sg_nents_for_len(req->src, src_len);
3485 if (src_nents < 0) {
3486 dev_err(ctx->dev, "Invalid number of src SG.\n");
3491 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3493 if (!mapped_nents) {
3494 dev_err(ctx->dev, "unable to DMA map source\n");
3501 /* allocate space for base edesc and link tables */
3502 edesc = qi_cache_zalloc(GFP_DMA | flags);
3504 dma_unmap_sg(ctx->dev, req->src, src_nents,
3509 edesc->src_nents = src_nents;
3510 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3511 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3513 sg_table = &edesc->sgt[0];
3515 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3520 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3525 sg_to_qm_sg_last(req->src, src_len,
3526 sg_table + qm_sg_src_index, 0);
3528 scatterwalk_map_and_copy(next_buf, req->src,
3532 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3536 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3537 qm_sg_bytes, DMA_TO_DEVICE);
3538 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3539 dev_err(ctx->dev, "unable to map S/G table\n");
3543 edesc->qm_sg_bytes = qm_sg_bytes;
3545 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3546 dpaa2_fl_set_final(in_fle, true);
3547 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3548 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3549 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3550 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3551 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3552 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3554 req_ctx->flc = &ctx->flc[UPDATE];
3555 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3556 req_ctx->cbk = ahash_done_bi;
3557 req_ctx->ctx = &req->base;
3558 req_ctx->edesc = edesc;
3560 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3561 if (ret != -EINPROGRESS &&
3563 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3565 } else if (*next_buflen) {
3566 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3568 *buflen = *next_buflen;
3569 *next_buflen = last_buflen;
3572 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3573 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
3574 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
3575 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
3580 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3581 qi_cache_free(edesc);
3585 static int ahash_final_ctx(struct ahash_request *req)
3587 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3588 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3589 struct caam_hash_state *state = ahash_request_ctx(req);
3590 struct caam_request *req_ctx = &state->caam_req;
3591 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3592 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3593 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3594 GFP_KERNEL : GFP_ATOMIC;
3595 int buflen = *current_buflen(state);
3597 int digestsize = crypto_ahash_digestsize(ahash);
3598 struct ahash_edesc *edesc;
3599 struct dpaa2_sg_entry *sg_table;
3602 /* allocate space for base edesc and link tables */
3603 edesc = qi_cache_zalloc(GFP_DMA | flags);
3607 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3608 sg_table = &edesc->sgt[0];
3610 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3615 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3619 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3621 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3623 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3624 dev_err(ctx->dev, "unable to map S/G table\n");
3628 edesc->qm_sg_bytes = qm_sg_bytes;
3630 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3631 dpaa2_fl_set_final(in_fle, true);
3632 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3633 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3634 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3635 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3636 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3637 dpaa2_fl_set_len(out_fle, digestsize);
3639 req_ctx->flc = &ctx->flc[FINALIZE];
3640 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3641 req_ctx->cbk = ahash_done_ctx_src;
3642 req_ctx->ctx = &req->base;
3643 req_ctx->edesc = edesc;
3645 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3646 if (ret == -EINPROGRESS ||
3647 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3651 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3652 qi_cache_free(edesc);
3656 static int ahash_finup_ctx(struct ahash_request *req)
3658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3659 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3660 struct caam_hash_state *state = ahash_request_ctx(req);
3661 struct caam_request *req_ctx = &state->caam_req;
3662 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3663 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3664 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3665 GFP_KERNEL : GFP_ATOMIC;
3666 int buflen = *current_buflen(state);
3667 int qm_sg_bytes, qm_sg_src_index;
3668 int src_nents, mapped_nents;
3669 int digestsize = crypto_ahash_digestsize(ahash);
3670 struct ahash_edesc *edesc;
3671 struct dpaa2_sg_entry *sg_table;
3674 src_nents = sg_nents_for_len(req->src, req->nbytes);
3675 if (src_nents < 0) {
3676 dev_err(ctx->dev, "Invalid number of src SG.\n");
3681 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3683 if (!mapped_nents) {
3684 dev_err(ctx->dev, "unable to DMA map source\n");
3691 /* allocate space for base edesc and link tables */
3692 edesc = qi_cache_zalloc(GFP_DMA | flags);
3694 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3698 edesc->src_nents = src_nents;
3699 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3700 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3702 sg_table = &edesc->sgt[0];
3704 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3709 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3713 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3715 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3717 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3718 dev_err(ctx->dev, "unable to map S/G table\n");
3722 edesc->qm_sg_bytes = qm_sg_bytes;
3724 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3725 dpaa2_fl_set_final(in_fle, true);
3726 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3727 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3728 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3729 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3730 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3731 dpaa2_fl_set_len(out_fle, digestsize);
3733 req_ctx->flc = &ctx->flc[FINALIZE];
3734 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3735 req_ctx->cbk = ahash_done_ctx_src;
3736 req_ctx->ctx = &req->base;
3737 req_ctx->edesc = edesc;
3739 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3740 if (ret == -EINPROGRESS ||
3741 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3745 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3746 qi_cache_free(edesc);
3750 static int ahash_digest(struct ahash_request *req)
3752 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3753 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3754 struct caam_hash_state *state = ahash_request_ctx(req);
3755 struct caam_request *req_ctx = &state->caam_req;
3756 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3757 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3758 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3759 GFP_KERNEL : GFP_ATOMIC;
3760 int digestsize = crypto_ahash_digestsize(ahash);
3761 int src_nents, mapped_nents;
3762 struct ahash_edesc *edesc;
3767 src_nents = sg_nents_for_len(req->src, req->nbytes);
3768 if (src_nents < 0) {
3769 dev_err(ctx->dev, "Invalid number of src SG.\n");
3774 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3776 if (!mapped_nents) {
3777 dev_err(ctx->dev, "unable to map source for DMA\n");
3784 /* allocate space for base edesc and link tables */
3785 edesc = qi_cache_zalloc(GFP_DMA | flags);
3787 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3791 edesc->src_nents = src_nents;
3792 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3794 if (mapped_nents > 1) {
3796 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3798 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3799 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3800 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3801 qm_sg_bytes, DMA_TO_DEVICE);
3802 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3803 dev_err(ctx->dev, "unable to map S/G table\n");
3806 edesc->qm_sg_bytes = qm_sg_bytes;
3807 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3808 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3810 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3811 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3814 state->ctx_dma_len = digestsize;
3815 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3817 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3818 dev_err(ctx->dev, "unable to map ctx\n");
3823 dpaa2_fl_set_final(in_fle, true);
3824 dpaa2_fl_set_len(in_fle, req->nbytes);
3825 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3826 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3827 dpaa2_fl_set_len(out_fle, digestsize);
3829 req_ctx->flc = &ctx->flc[DIGEST];
3830 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3831 req_ctx->cbk = ahash_done;
3832 req_ctx->ctx = &req->base;
3833 req_ctx->edesc = edesc;
3834 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3835 if (ret == -EINPROGRESS ||
3836 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3840 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3841 qi_cache_free(edesc);
3845 static int ahash_final_no_ctx(struct ahash_request *req)
3847 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3848 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3849 struct caam_hash_state *state = ahash_request_ctx(req);
3850 struct caam_request *req_ctx = &state->caam_req;
3851 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3852 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3853 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3854 GFP_KERNEL : GFP_ATOMIC;
3855 u8 *buf = current_buf(state);
3856 int buflen = *current_buflen(state);
3857 int digestsize = crypto_ahash_digestsize(ahash);
3858 struct ahash_edesc *edesc;
3861 /* allocate space for base edesc and link tables */
3862 edesc = qi_cache_zalloc(GFP_DMA | flags);
3867 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3869 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3870 dev_err(ctx->dev, "unable to map src\n");
3875 state->ctx_dma_len = digestsize;
3876 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3878 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3879 dev_err(ctx->dev, "unable to map ctx\n");
3884 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3885 dpaa2_fl_set_final(in_fle, true);
3887 * crypto engine requires the input entry to be present when
3888 * "frame list" FD is used.
3889 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3890 * in_fle zeroized (except for "Final" flag) is the best option.
3893 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3894 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3895 dpaa2_fl_set_len(in_fle, buflen);
3897 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3898 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3899 dpaa2_fl_set_len(out_fle, digestsize);
3901 req_ctx->flc = &ctx->flc[DIGEST];
3902 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3903 req_ctx->cbk = ahash_done;
3904 req_ctx->ctx = &req->base;
3905 req_ctx->edesc = edesc;
3907 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3908 if (ret == -EINPROGRESS ||
3909 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3913 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3914 qi_cache_free(edesc);
3918 static int ahash_update_no_ctx(struct ahash_request *req)
3920 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3921 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3922 struct caam_hash_state *state = ahash_request_ctx(req);
3923 struct caam_request *req_ctx = &state->caam_req;
3924 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3925 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3926 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3927 GFP_KERNEL : GFP_ATOMIC;
3928 u8 *buf = current_buf(state);
3929 int *buflen = current_buflen(state);
3930 u8 *next_buf = alt_buf(state);
3931 int *next_buflen = alt_buflen(state);
3932 int in_len = *buflen + req->nbytes, to_hash;
3933 int qm_sg_bytes, src_nents, mapped_nents;
3934 struct ahash_edesc *edesc;
3937 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3938 to_hash = in_len - *next_buflen;
3941 struct dpaa2_sg_entry *sg_table;
3942 int src_len = req->nbytes - *next_buflen;
3944 src_nents = sg_nents_for_len(req->src, src_len);
3945 if (src_nents < 0) {
3946 dev_err(ctx->dev, "Invalid number of src SG.\n");
3951 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3953 if (!mapped_nents) {
3954 dev_err(ctx->dev, "unable to DMA map source\n");
3961 /* allocate space for base edesc and link tables */
3962 edesc = qi_cache_zalloc(GFP_DMA | flags);
3964 dma_unmap_sg(ctx->dev, req->src, src_nents,
3969 edesc->src_nents = src_nents;
3970 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
3972 sg_table = &edesc->sgt[0];
3974 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
3978 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
3981 scatterwalk_map_and_copy(next_buf, req->src,
3985 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3986 qm_sg_bytes, DMA_TO_DEVICE);
3987 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3988 dev_err(ctx->dev, "unable to map S/G table\n");
3992 edesc->qm_sg_bytes = qm_sg_bytes;
3994 state->ctx_dma_len = ctx->ctx_len;
3995 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
3996 ctx->ctx_len, DMA_FROM_DEVICE);
3997 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3998 dev_err(ctx->dev, "unable to map ctx\n");
4004 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4005 dpaa2_fl_set_final(in_fle, true);
4006 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4007 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4008 dpaa2_fl_set_len(in_fle, to_hash);
4009 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4010 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4011 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4013 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4014 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4015 req_ctx->cbk = ahash_done_ctx_dst;
4016 req_ctx->ctx = &req->base;
4017 req_ctx->edesc = edesc;
4019 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4020 if (ret != -EINPROGRESS &&
4022 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4025 state->update = ahash_update_ctx;
4026 state->finup = ahash_finup_ctx;
4027 state->final = ahash_final_ctx;
4028 } else if (*next_buflen) {
4029 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4031 *buflen = *next_buflen;
4035 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4036 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
4037 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4038 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4043 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4044 qi_cache_free(edesc);
4048 static int ahash_finup_no_ctx(struct ahash_request *req)
4050 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4051 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4052 struct caam_hash_state *state = ahash_request_ctx(req);
4053 struct caam_request *req_ctx = &state->caam_req;
4054 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4055 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4056 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4057 GFP_KERNEL : GFP_ATOMIC;
4058 int buflen = *current_buflen(state);
4059 int qm_sg_bytes, src_nents, mapped_nents;
4060 int digestsize = crypto_ahash_digestsize(ahash);
4061 struct ahash_edesc *edesc;
4062 struct dpaa2_sg_entry *sg_table;
4065 src_nents = sg_nents_for_len(req->src, req->nbytes);
4066 if (src_nents < 0) {
4067 dev_err(ctx->dev, "Invalid number of src SG.\n");
4072 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4074 if (!mapped_nents) {
4075 dev_err(ctx->dev, "unable to DMA map source\n");
4082 /* allocate space for base edesc and link tables */
4083 edesc = qi_cache_zalloc(GFP_DMA | flags);
4085 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4089 edesc->src_nents = src_nents;
4090 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4091 sg_table = &edesc->sgt[0];
4093 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4097 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4099 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4101 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4102 dev_err(ctx->dev, "unable to map S/G table\n");
4106 edesc->qm_sg_bytes = qm_sg_bytes;
4108 state->ctx_dma_len = digestsize;
4109 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4111 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4112 dev_err(ctx->dev, "unable to map ctx\n");
4118 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4119 dpaa2_fl_set_final(in_fle, true);
4120 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4121 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4122 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4123 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4124 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4125 dpaa2_fl_set_len(out_fle, digestsize);
4127 req_ctx->flc = &ctx->flc[DIGEST];
4128 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4129 req_ctx->cbk = ahash_done;
4130 req_ctx->ctx = &req->base;
4131 req_ctx->edesc = edesc;
4132 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4133 if (ret != -EINPROGRESS &&
4134 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4139 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4140 qi_cache_free(edesc);
4144 static int ahash_update_first(struct ahash_request *req)
4146 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4147 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4148 struct caam_hash_state *state = ahash_request_ctx(req);
4149 struct caam_request *req_ctx = &state->caam_req;
4150 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4151 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4152 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4153 GFP_KERNEL : GFP_ATOMIC;
4154 u8 *next_buf = alt_buf(state);
4155 int *next_buflen = alt_buflen(state);
4157 int src_nents, mapped_nents;
4158 struct ahash_edesc *edesc;
4161 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4163 to_hash = req->nbytes - *next_buflen;
4166 struct dpaa2_sg_entry *sg_table;
4167 int src_len = req->nbytes - *next_buflen;
4169 src_nents = sg_nents_for_len(req->src, src_len);
4170 if (src_nents < 0) {
4171 dev_err(ctx->dev, "Invalid number of src SG.\n");
4176 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4178 if (!mapped_nents) {
4179 dev_err(ctx->dev, "unable to map source for DMA\n");
4186 /* allocate space for base edesc and link tables */
4187 edesc = qi_cache_zalloc(GFP_DMA | flags);
4189 dma_unmap_sg(ctx->dev, req->src, src_nents,
4194 edesc->src_nents = src_nents;
4195 sg_table = &edesc->sgt[0];
4197 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4198 dpaa2_fl_set_final(in_fle, true);
4199 dpaa2_fl_set_len(in_fle, to_hash);
4201 if (mapped_nents > 1) {
4204 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4205 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4207 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4210 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4211 dev_err(ctx->dev, "unable to map S/G table\n");
4215 edesc->qm_sg_bytes = qm_sg_bytes;
4216 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4217 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4219 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4220 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4224 scatterwalk_map_and_copy(next_buf, req->src, to_hash,
4227 state->ctx_dma_len = ctx->ctx_len;
4228 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4229 ctx->ctx_len, DMA_FROM_DEVICE);
4230 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4231 dev_err(ctx->dev, "unable to map ctx\n");
4237 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4238 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4239 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4241 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4242 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4243 req_ctx->cbk = ahash_done_ctx_dst;
4244 req_ctx->ctx = &req->base;
4245 req_ctx->edesc = edesc;
4247 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4248 if (ret != -EINPROGRESS &&
4249 !(ret == -EBUSY && req->base.flags &
4250 CRYPTO_TFM_REQ_MAY_BACKLOG))
4253 state->update = ahash_update_ctx;
4254 state->finup = ahash_finup_ctx;
4255 state->final = ahash_final_ctx;
4256 } else if (*next_buflen) {
4257 state->update = ahash_update_no_ctx;
4258 state->finup = ahash_finup_no_ctx;
4259 state->final = ahash_final_no_ctx;
4260 scatterwalk_map_and_copy(next_buf, req->src, 0,
4265 print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
4266 DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
4271 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4272 qi_cache_free(edesc);
4276 static int ahash_finup_first(struct ahash_request *req)
4278 return ahash_digest(req);
4281 static int ahash_init(struct ahash_request *req)
4283 struct caam_hash_state *state = ahash_request_ctx(req);
4285 state->update = ahash_update_first;
4286 state->finup = ahash_finup_first;
4287 state->final = ahash_final_no_ctx;
4290 state->ctx_dma_len = 0;
4291 state->current_buf = 0;
4293 state->buflen_0 = 0;
4294 state->buflen_1 = 0;
4299 static int ahash_update(struct ahash_request *req)
4301 struct caam_hash_state *state = ahash_request_ctx(req);
4303 return state->update(req);
4306 static int ahash_finup(struct ahash_request *req)
4308 struct caam_hash_state *state = ahash_request_ctx(req);
4310 return state->finup(req);
4313 static int ahash_final(struct ahash_request *req)
4315 struct caam_hash_state *state = ahash_request_ctx(req);
4317 return state->final(req);
4320 static int ahash_export(struct ahash_request *req, void *out)
4322 struct caam_hash_state *state = ahash_request_ctx(req);
4323 struct caam_export_state *export = out;
4327 if (state->current_buf) {
4329 len = state->buflen_1;
4332 len = state->buflen_0;
4335 memcpy(export->buf, buf, len);
4336 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4337 export->buflen = len;
4338 export->update = state->update;
4339 export->final = state->final;
4340 export->finup = state->finup;
4345 static int ahash_import(struct ahash_request *req, const void *in)
4347 struct caam_hash_state *state = ahash_request_ctx(req);
4348 const struct caam_export_state *export = in;
4350 memset(state, 0, sizeof(*state));
4351 memcpy(state->buf_0, export->buf, export->buflen);
4352 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4353 state->buflen_0 = export->buflen;
4354 state->update = export->update;
4355 state->final = export->final;
4356 state->finup = export->finup;
4361 struct caam_hash_template {
4362 char name[CRYPTO_MAX_ALG_NAME];
4363 char driver_name[CRYPTO_MAX_ALG_NAME];
4364 char hmac_name[CRYPTO_MAX_ALG_NAME];
4365 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4366 unsigned int blocksize;
4367 struct ahash_alg template_ahash;
4371 /* ahash descriptors */
4372 static struct caam_hash_template driver_hash[] = {
4375 .driver_name = "sha1-caam-qi2",
4376 .hmac_name = "hmac(sha1)",
4377 .hmac_driver_name = "hmac-sha1-caam-qi2",
4378 .blocksize = SHA1_BLOCK_SIZE,
4381 .update = ahash_update,
4382 .final = ahash_final,
4383 .finup = ahash_finup,
4384 .digest = ahash_digest,
4385 .export = ahash_export,
4386 .import = ahash_import,
4387 .setkey = ahash_setkey,
4389 .digestsize = SHA1_DIGEST_SIZE,
4390 .statesize = sizeof(struct caam_export_state),
4393 .alg_type = OP_ALG_ALGSEL_SHA1,
4396 .driver_name = "sha224-caam-qi2",
4397 .hmac_name = "hmac(sha224)",
4398 .hmac_driver_name = "hmac-sha224-caam-qi2",
4399 .blocksize = SHA224_BLOCK_SIZE,
4402 .update = ahash_update,
4403 .final = ahash_final,
4404 .finup = ahash_finup,
4405 .digest = ahash_digest,
4406 .export = ahash_export,
4407 .import = ahash_import,
4408 .setkey = ahash_setkey,
4410 .digestsize = SHA224_DIGEST_SIZE,
4411 .statesize = sizeof(struct caam_export_state),
4414 .alg_type = OP_ALG_ALGSEL_SHA224,
4417 .driver_name = "sha256-caam-qi2",
4418 .hmac_name = "hmac(sha256)",
4419 .hmac_driver_name = "hmac-sha256-caam-qi2",
4420 .blocksize = SHA256_BLOCK_SIZE,
4423 .update = ahash_update,
4424 .final = ahash_final,
4425 .finup = ahash_finup,
4426 .digest = ahash_digest,
4427 .export = ahash_export,
4428 .import = ahash_import,
4429 .setkey = ahash_setkey,
4431 .digestsize = SHA256_DIGEST_SIZE,
4432 .statesize = sizeof(struct caam_export_state),
4435 .alg_type = OP_ALG_ALGSEL_SHA256,
4438 .driver_name = "sha384-caam-qi2",
4439 .hmac_name = "hmac(sha384)",
4440 .hmac_driver_name = "hmac-sha384-caam-qi2",
4441 .blocksize = SHA384_BLOCK_SIZE,
4444 .update = ahash_update,
4445 .final = ahash_final,
4446 .finup = ahash_finup,
4447 .digest = ahash_digest,
4448 .export = ahash_export,
4449 .import = ahash_import,
4450 .setkey = ahash_setkey,
4452 .digestsize = SHA384_DIGEST_SIZE,
4453 .statesize = sizeof(struct caam_export_state),
4456 .alg_type = OP_ALG_ALGSEL_SHA384,
4459 .driver_name = "sha512-caam-qi2",
4460 .hmac_name = "hmac(sha512)",
4461 .hmac_driver_name = "hmac-sha512-caam-qi2",
4462 .blocksize = SHA512_BLOCK_SIZE,
4465 .update = ahash_update,
4466 .final = ahash_final,
4467 .finup = ahash_finup,
4468 .digest = ahash_digest,
4469 .export = ahash_export,
4470 .import = ahash_import,
4471 .setkey = ahash_setkey,
4473 .digestsize = SHA512_DIGEST_SIZE,
4474 .statesize = sizeof(struct caam_export_state),
4477 .alg_type = OP_ALG_ALGSEL_SHA512,
4480 .driver_name = "md5-caam-qi2",
4481 .hmac_name = "hmac(md5)",
4482 .hmac_driver_name = "hmac-md5-caam-qi2",
4483 .blocksize = MD5_BLOCK_WORDS * 4,
4486 .update = ahash_update,
4487 .final = ahash_final,
4488 .finup = ahash_finup,
4489 .digest = ahash_digest,
4490 .export = ahash_export,
4491 .import = ahash_import,
4492 .setkey = ahash_setkey,
4494 .digestsize = MD5_DIGEST_SIZE,
4495 .statesize = sizeof(struct caam_export_state),
4498 .alg_type = OP_ALG_ALGSEL_MD5,
4502 struct caam_hash_alg {
4503 struct list_head entry;
4506 struct ahash_alg ahash_alg;
4509 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4511 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4512 struct crypto_alg *base = tfm->__crt_alg;
4513 struct hash_alg_common *halg =
4514 container_of(base, struct hash_alg_common, base);
4515 struct ahash_alg *alg =
4516 container_of(halg, struct ahash_alg, halg);
4517 struct caam_hash_alg *caam_hash =
4518 container_of(alg, struct caam_hash_alg, ahash_alg);
4519 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4520 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4521 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4522 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4524 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4526 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4527 dma_addr_t dma_addr;
4530 ctx->dev = caam_hash->dev;
4533 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4534 ARRAY_SIZE(ctx->key),
4536 DMA_ATTR_SKIP_CPU_SYNC);
4537 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4538 dev_err(ctx->dev, "unable to map key\n");
4543 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4545 DMA_ATTR_SKIP_CPU_SYNC);
4546 if (dma_mapping_error(ctx->dev, dma_addr)) {
4547 dev_err(ctx->dev, "unable to map shared descriptors\n");
4548 if (ctx->adata.key_dma)
4549 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4550 ARRAY_SIZE(ctx->key),
4552 DMA_ATTR_SKIP_CPU_SYNC);
4556 for (i = 0; i < HASH_NUM_OP; i++)
4557 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4559 /* copy descriptor header template value */
4560 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4562 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4563 OP_ALG_ALGSEL_SUBMASK) >>
4564 OP_ALG_ALGSEL_SHIFT];
4566 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4567 sizeof(struct caam_hash_state));
4569 return ahash_set_sh_desc(ahash);
4572 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4574 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4576 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4577 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4578 if (ctx->adata.key_dma)
4579 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4580 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4581 DMA_ATTR_SKIP_CPU_SYNC);
4584 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4585 struct caam_hash_template *template, bool keyed)
4587 struct caam_hash_alg *t_alg;
4588 struct ahash_alg *halg;
4589 struct crypto_alg *alg;
4591 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4593 return ERR_PTR(-ENOMEM);
4595 t_alg->ahash_alg = template->template_ahash;
4596 halg = &t_alg->ahash_alg;
4597 alg = &halg->halg.base;
4600 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4601 template->hmac_name);
4602 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4603 template->hmac_driver_name);
4605 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4607 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4608 template->driver_name);
4609 t_alg->ahash_alg.setkey = NULL;
4611 alg->cra_module = THIS_MODULE;
4612 alg->cra_init = caam_hash_cra_init;
4613 alg->cra_exit = caam_hash_cra_exit;
4614 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4615 alg->cra_priority = CAAM_CRA_PRIORITY;
4616 alg->cra_blocksize = template->blocksize;
4617 alg->cra_alignmask = 0;
4618 alg->cra_flags = CRYPTO_ALG_ASYNC;
4620 t_alg->alg_type = template->alg_type;
4626 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4628 struct dpaa2_caam_priv_per_cpu *ppriv;
4630 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4631 napi_schedule_irqoff(&ppriv->napi);
4634 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4636 struct device *dev = priv->dev;
4637 struct dpaa2_io_notification_ctx *nctx;
4638 struct dpaa2_caam_priv_per_cpu *ppriv;
4639 int err, i = 0, cpu;
4641 for_each_online_cpu(cpu) {
4642 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4644 nctx = &ppriv->nctx;
4646 nctx->id = ppriv->rsp_fqid;
4647 nctx->desired_cpu = cpu;
4648 nctx->cb = dpaa2_caam_fqdan_cb;
4650 /* Register notification callbacks */
4651 ppriv->dpio = dpaa2_io_service_select(cpu);
4652 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4653 if (unlikely(err)) {
4654 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4657 * If no affine DPIO for this core, there's probably
4658 * none available for next cores either. Signal we want
4659 * to retry later, in case the DPIO devices weren't
4662 err = -EPROBE_DEFER;
4666 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4668 if (unlikely(!ppriv->store)) {
4669 dev_err(dev, "dpaa2_io_store_create() failed\n");
4674 if (++i == priv->num_pairs)
4681 for_each_online_cpu(cpu) {
4682 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4683 if (!ppriv->nctx.cb)
4685 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4688 for_each_online_cpu(cpu) {
4689 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4692 dpaa2_io_store_destroy(ppriv->store);
4698 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4700 struct dpaa2_caam_priv_per_cpu *ppriv;
4703 for_each_online_cpu(cpu) {
4704 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4705 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4707 dpaa2_io_store_destroy(ppriv->store);
4709 if (++i == priv->num_pairs)
4714 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4716 struct dpseci_rx_queue_cfg rx_queue_cfg;
4717 struct device *dev = priv->dev;
4718 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4719 struct dpaa2_caam_priv_per_cpu *ppriv;
4720 int err = 0, i = 0, cpu;
4722 /* Configure Rx queues */
4723 for_each_online_cpu(cpu) {
4724 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4726 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4727 DPSECI_QUEUE_OPT_USER_CTX;
4728 rx_queue_cfg.order_preservation_en = 0;
4729 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4730 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4732 * Rx priority (WQ) doesn't really matter, since we use
4733 * pull mode, i.e. volatile dequeues from specific FQs
4735 rx_queue_cfg.dest_cfg.priority = 0;
4736 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4738 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4741 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4746 if (++i == priv->num_pairs)
4753 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4755 struct device *dev = priv->dev;
4757 if (!priv->cscn_mem)
4760 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4761 kfree(priv->cscn_mem);
4764 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4766 struct device *dev = priv->dev;
4767 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4769 dpaa2_dpseci_congestion_free(priv);
4770 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4773 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4774 const struct dpaa2_fd *fd)
4776 struct caam_request *req;
4779 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4780 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4784 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4785 if (unlikely(fd_err))
4786 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4789 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4790 * in FD[ERR] or FD[FRC].
4792 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4793 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4795 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4798 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4802 /* Retry while portal is busy */
4804 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4806 } while (err == -EBUSY);
4809 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4814 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4816 struct dpaa2_dq *dq;
4817 int cleaned = 0, is_last;
4820 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4821 if (unlikely(!dq)) {
4822 if (unlikely(!is_last)) {
4823 dev_dbg(ppriv->priv->dev,
4824 "FQ %d returned no valid frames\n",
4827 * MUST retry until we get some sort of
4828 * valid response token (be it "empty dequeue"
4829 * or a valid frame).
4837 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4844 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4846 struct dpaa2_caam_priv_per_cpu *ppriv;
4847 struct dpaa2_caam_priv *priv;
4848 int err, cleaned = 0, store_cleaned;
4850 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4853 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4857 store_cleaned = dpaa2_caam_store_consume(ppriv);
4858 cleaned += store_cleaned;
4860 if (store_cleaned == 0 ||
4861 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4864 /* Try to dequeue some more */
4865 err = dpaa2_caam_pull_fq(ppriv);
4870 if (cleaned < budget) {
4871 napi_complete_done(napi, cleaned);
4872 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4874 dev_err(priv->dev, "Notification rearm failed: %d\n",
4881 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4884 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4885 struct device *dev = priv->dev;
4889 * Congestion group feature supported starting with DPSECI API v5.1
4890 * and only when object has been created with this capability.
4892 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4893 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4896 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4897 GFP_KERNEL | GFP_DMA);
4898 if (!priv->cscn_mem)
4901 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4902 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4903 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4904 if (dma_mapping_error(dev, priv->cscn_dma)) {
4905 dev_err(dev, "Error mapping CSCN memory area\n");
4910 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4911 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4912 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4913 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4914 cong_notif_cfg.message_iova = priv->cscn_dma;
4915 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4916 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4917 DPSECI_CGN_MODE_COHERENT_WRITE;
4919 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4922 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4929 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4931 kfree(priv->cscn_mem);
4936 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4938 struct device *dev = &ls_dev->dev;
4939 struct dpaa2_caam_priv *priv;
4940 struct dpaa2_caam_priv_per_cpu *ppriv;
4944 priv = dev_get_drvdata(dev);
4947 priv->dpsec_id = ls_dev->obj_desc.id;
4949 /* Get a handle for the DPSECI this interface is associate with */
4950 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4952 dev_err(dev, "dpseci_open() failed: %d\n", err);
4956 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4959 dev_err(dev, "dpseci_get_api_version() failed\n");
4963 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
4965 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
4966 &priv->dpseci_attr);
4968 dev_err(dev, "dpseci_get_attributes() failed\n");
4972 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
4975 dev_err(dev, "dpseci_get_sec_attr() failed\n");
4979 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
4981 dev_err(dev, "setup_congestion() failed\n");
4985 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
4986 priv->dpseci_attr.num_tx_queues);
4987 if (priv->num_pairs > num_online_cpus()) {
4988 dev_warn(dev, "%d queues won't be used\n",
4989 priv->num_pairs - num_online_cpus());
4990 priv->num_pairs = num_online_cpus();
4993 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
4994 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4995 &priv->rx_queue_attr[i]);
4997 dev_err(dev, "dpseci_get_rx_queue() failed\n");
4998 goto err_get_rx_queue;
5002 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5003 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5004 &priv->tx_queue_attr[i]);
5006 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5007 goto err_get_rx_queue;
5012 for_each_online_cpu(cpu) {
5015 j = i % priv->num_pairs;
5017 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5018 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5021 * Allow all cores to enqueue, while only some of them
5022 * will take part in dequeuing.
5024 if (++i > priv->num_pairs)
5027 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5030 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5031 priv->rx_queue_attr[j].fqid,
5032 priv->tx_queue_attr[j].fqid);
5034 ppriv->net_dev.dev = *dev;
5035 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5036 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5037 DPAA2_CAAM_NAPI_WEIGHT);
5043 dpaa2_dpseci_congestion_free(priv);
5045 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5050 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5052 struct device *dev = priv->dev;
5053 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5054 struct dpaa2_caam_priv_per_cpu *ppriv;
5057 for (i = 0; i < priv->num_pairs; i++) {
5058 ppriv = per_cpu_ptr(priv->ppriv, i);
5059 napi_enable(&ppriv->napi);
5062 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5065 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5067 struct device *dev = priv->dev;
5068 struct dpaa2_caam_priv_per_cpu *ppriv;
5069 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5070 int i, err = 0, enabled;
5072 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5074 dev_err(dev, "dpseci_disable() failed\n");
5078 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5080 dev_err(dev, "dpseci_is_enabled() failed\n");
5084 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5086 for (i = 0; i < priv->num_pairs; i++) {
5087 ppriv = per_cpu_ptr(priv->ppriv, i);
5088 napi_disable(&ppriv->napi);
5089 netif_napi_del(&ppriv->napi);
5095 static struct list_head hash_list;
5097 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5100 struct dpaa2_caam_priv *priv;
5102 bool registered = false;
5105 * There is no way to get CAAM endianness - there is no direct register
5106 * space access and MC f/w does not provide this attribute.
5107 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5110 caam_little_end = true;
5114 dev = &dpseci_dev->dev;
5116 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5120 dev_set_drvdata(dev, priv);
5122 priv->domain = iommu_get_domain_for_dev(dev);
5124 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5125 0, SLAB_CACHE_DMA, NULL);
5127 dev_err(dev, "Can't allocate SEC cache\n");
5131 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5133 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5137 /* Obtain a MC portal */
5138 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5141 err = -EPROBE_DEFER;
5143 dev_err(dev, "MC portal allocation failed\n");
5148 priv->ppriv = alloc_percpu(*priv->ppriv);
5150 dev_err(dev, "alloc_percpu() failed\n");
5152 goto err_alloc_ppriv;
5155 /* DPSECI initialization */
5156 err = dpaa2_dpseci_setup(dpseci_dev);
5158 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5159 goto err_dpseci_setup;
5163 err = dpaa2_dpseci_dpio_setup(priv);
5165 if (err != -EPROBE_DEFER)
5166 dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
5167 goto err_dpio_setup;
5170 /* DPSECI binding to DPIO */
5171 err = dpaa2_dpseci_bind(priv);
5173 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5178 err = dpaa2_dpseci_enable(priv);
5180 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5184 dpaa2_dpseci_debugfs_init(priv);
5186 /* register crypto algorithms the device supports */
5187 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5188 struct caam_skcipher_alg *t_alg = driver_algs + i;
5189 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5191 /* Skip DES algorithms if not supported by device */
5192 if (!priv->sec_attr.des_acc_num &&
5193 (alg_sel == OP_ALG_ALGSEL_3DES ||
5194 alg_sel == OP_ALG_ALGSEL_DES))
5197 /* Skip AES algorithms if not supported by device */
5198 if (!priv->sec_attr.aes_acc_num &&
5199 alg_sel == OP_ALG_ALGSEL_AES)
5202 /* Skip CHACHA20 algorithms if not supported by device */
5203 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5204 !priv->sec_attr.ccha_acc_num)
5207 t_alg->caam.dev = dev;
5208 caam_skcipher_alg_init(t_alg);
5210 err = crypto_register_skcipher(&t_alg->skcipher);
5212 dev_warn(dev, "%s alg registration failed: %d\n",
5213 t_alg->skcipher.base.cra_driver_name, err);
5217 t_alg->registered = true;
5221 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5222 struct caam_aead_alg *t_alg = driver_aeads + i;
5223 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5225 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5228 /* Skip DES algorithms if not supported by device */
5229 if (!priv->sec_attr.des_acc_num &&
5230 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5231 c1_alg_sel == OP_ALG_ALGSEL_DES))
5234 /* Skip AES algorithms if not supported by device */
5235 if (!priv->sec_attr.aes_acc_num &&
5236 c1_alg_sel == OP_ALG_ALGSEL_AES)
5239 /* Skip CHACHA20 algorithms if not supported by device */
5240 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5241 !priv->sec_attr.ccha_acc_num)
5244 /* Skip POLY1305 algorithms if not supported by device */
5245 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5246 !priv->sec_attr.ptha_acc_num)
5250 * Skip algorithms requiring message digests
5251 * if MD not supported by device.
5253 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5254 !priv->sec_attr.md_acc_num)
5257 t_alg->caam.dev = dev;
5258 caam_aead_alg_init(t_alg);
5260 err = crypto_register_aead(&t_alg->aead);
5262 dev_warn(dev, "%s alg registration failed: %d\n",
5263 t_alg->aead.base.cra_driver_name, err);
5267 t_alg->registered = true;
5271 dev_info(dev, "algorithms registered in /proc/crypto\n");
5273 /* register hash algorithms the device supports */
5274 INIT_LIST_HEAD(&hash_list);
5277 * Skip registration of any hashing algorithms if MD block
5280 if (!priv->sec_attr.md_acc_num)
5283 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5284 struct caam_hash_alg *t_alg;
5285 struct caam_hash_template *alg = driver_hash + i;
5287 /* register hmac version */
5288 t_alg = caam_hash_alloc(dev, alg, true);
5289 if (IS_ERR(t_alg)) {
5290 err = PTR_ERR(t_alg);
5291 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5292 alg->driver_name, err);
5296 err = crypto_register_ahash(&t_alg->ahash_alg);
5298 dev_warn(dev, "%s alg registration failed: %d\n",
5299 t_alg->ahash_alg.halg.base.cra_driver_name,
5303 list_add_tail(&t_alg->entry, &hash_list);
5306 /* register unkeyed version */
5307 t_alg = caam_hash_alloc(dev, alg, false);
5308 if (IS_ERR(t_alg)) {
5309 err = PTR_ERR(t_alg);
5310 dev_warn(dev, "%s alg allocation failed: %d\n",
5311 alg->driver_name, err);
5315 err = crypto_register_ahash(&t_alg->ahash_alg);
5317 dev_warn(dev, "%s alg registration failed: %d\n",
5318 t_alg->ahash_alg.halg.base.cra_driver_name,
5322 list_add_tail(&t_alg->entry, &hash_list);
5325 if (!list_empty(&hash_list))
5326 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5331 dpaa2_dpseci_dpio_free(priv);
5333 dpaa2_dpseci_free(priv);
5335 free_percpu(priv->ppriv);
5337 fsl_mc_portal_free(priv->mc_io);
5339 kmem_cache_destroy(qi_cache);
5344 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5347 struct dpaa2_caam_priv *priv;
5351 priv = dev_get_drvdata(dev);
5353 dpaa2_dpseci_debugfs_exit(priv);
5355 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5356 struct caam_aead_alg *t_alg = driver_aeads + i;
5358 if (t_alg->registered)
5359 crypto_unregister_aead(&t_alg->aead);
5362 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5363 struct caam_skcipher_alg *t_alg = driver_algs + i;
5365 if (t_alg->registered)
5366 crypto_unregister_skcipher(&t_alg->skcipher);
5369 if (hash_list.next) {
5370 struct caam_hash_alg *t_hash_alg, *p;
5372 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5373 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5374 list_del(&t_hash_alg->entry);
5379 dpaa2_dpseci_disable(priv);
5380 dpaa2_dpseci_dpio_free(priv);
5381 dpaa2_dpseci_free(priv);
5382 free_percpu(priv->ppriv);
5383 fsl_mc_portal_free(priv->mc_io);
5384 kmem_cache_destroy(qi_cache);
5389 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5392 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5393 struct dpaa2_caam_priv_per_cpu *ppriv;
5397 return PTR_ERR(req);
5399 if (priv->cscn_mem) {
5400 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5403 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5404 dev_dbg_ratelimited(dev, "Dropping request\n");
5409 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5411 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5413 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5414 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5418 memset(&fd, 0, sizeof(fd));
5419 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5420 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5421 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5422 dpaa2_fd_set_flc(&fd, req->flc_dma);
5424 ppriv = this_cpu_ptr(priv->ppriv);
5425 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5426 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5434 if (unlikely(err)) {
5435 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5439 return -EINPROGRESS;
5442 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5446 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5448 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5450 .vendor = FSL_MC_VENDOR_FREESCALE,
5451 .obj_type = "dpseci",
5456 static struct fsl_mc_driver dpaa2_caam_driver = {
5458 .name = KBUILD_MODNAME,
5459 .owner = THIS_MODULE,
5461 .probe = dpaa2_caam_probe,
5462 .remove = dpaa2_caam_remove,
5463 .match_id_table = dpaa2_caam_match_id_table
5466 MODULE_LICENSE("Dual BSD/GPL");
5467 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5468 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5470 module_fsl_mc_driver(dpaa2_caam_driver);