]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/crypto/caam/caamhash.c
Merge tag 'pwm/for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux.git] / drivers / crypto / caam / caamhash.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for ahash functions of crypto API
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  * relationship of digest job descriptor or first job descriptor after init to
11  * shared descriptors:
12  *
13  * ---------------                     ---------------
14  * | JobDesc #1  |-------------------->|  ShareDesc  |
15  * | *(packet 1) |                     |  (hashKey)  |
16  * ---------------                     | (operation) |
17  *                                     ---------------
18  *
19  * relationship of subsequent job descriptors to shared descriptors:
20  *
21  * ---------------                     ---------------
22  * | JobDesc #2  |-------------------->|  ShareDesc  |
23  * | *(packet 2) |      |------------->|  (hashKey)  |
24  * ---------------      |    |-------->| (operation) |
25  *       .              |    |         | (load ctx2) |
26  *       .              |    |         ---------------
27  * ---------------      |    |
28  * | JobDesc #3  |------|    |
29  * | *(packet 3) |           |
30  * ---------------           |
31  *       .                   |
32  *       .                   |
33  * ---------------           |
34  * | JobDesc #4  |------------
35  * | *(packet 4) |
36  * ---------------
37  *
38  * The SharedDesc never changes for a connection unless rekeyed, but
39  * each packet will likely be in a different place. So all we need
40  * to know to process the packet is where the input is, where the
41  * output goes, and what context we want to process with. Context is
42  * in the SharedDesc, packet references in the JobDesc.
43  *
44  * So, a job desc looks like:
45  *
46  * ---------------------
47  * | Header            |
48  * | ShareDesc Pointer |
49  * | SEQ_OUT_PTR       |
50  * | (output buffer)   |
51  * | (output length)   |
52  * | SEQ_IN_PTR        |
53  * | (input buffer)    |
54  * | (input length)    |
55  * ---------------------
56  */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68
69 #define CAAM_CRA_PRIORITY               3000
70
71 /* max hash key is max split key size */
72 #define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
73
74 #define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
75 #define CAAM_MAX_HASH_DIGEST_SIZE       SHA512_DIGEST_SIZE
76
77 #define DESC_HASH_MAX_USED_BYTES        (DESC_AHASH_FINAL_LEN + \
78                                          CAAM_MAX_HASH_KEY_SIZE)
79 #define DESC_HASH_MAX_USED_LEN          (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
80
81 /* caam context sizes for hashes: running digest + 8 */
82 #define HASH_MSG_LEN                    8
83 #define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
84
85 static struct list_head hash_list;
86
87 /* ahash per-session context */
88 struct caam_hash_ctx {
89         u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
90         u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
91         u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
92         u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
93         u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
94         dma_addr_t sh_desc_update_dma ____cacheline_aligned;
95         dma_addr_t sh_desc_update_first_dma;
96         dma_addr_t sh_desc_fin_dma;
97         dma_addr_t sh_desc_digest_dma;
98         enum dma_data_direction dir;
99         enum dma_data_direction key_dir;
100         struct device *jrdev;
101         int ctx_len;
102         struct alginfo adata;
103 };
104
105 /* ahash state */
106 struct caam_hash_state {
107         dma_addr_t buf_dma;
108         dma_addr_t ctx_dma;
109         int ctx_dma_len;
110         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
111         int buflen;
112         int next_buflen;
113         u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
114         int (*update)(struct ahash_request *req);
115         int (*final)(struct ahash_request *req);
116         int (*finup)(struct ahash_request *req);
117 };
118
119 struct caam_export_state {
120         u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
121         u8 caam_ctx[MAX_CTX_LEN];
122         int buflen;
123         int (*update)(struct ahash_request *req);
124         int (*final)(struct ahash_request *req);
125         int (*finup)(struct ahash_request *req);
126 };
127
128 static inline bool is_cmac_aes(u32 algtype)
129 {
130         return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
131                (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
132 }
133 /* Common job descriptor seq in/out ptr routines */
134
135 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
136 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
137                                       struct caam_hash_state *state,
138                                       int ctx_len)
139 {
140         state->ctx_dma_len = ctx_len;
141         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
142                                         ctx_len, DMA_FROM_DEVICE);
143         if (dma_mapping_error(jrdev, state->ctx_dma)) {
144                 dev_err(jrdev, "unable to map ctx\n");
145                 state->ctx_dma = 0;
146                 return -ENOMEM;
147         }
148
149         append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
150
151         return 0;
152 }
153
154 /* Map current buffer in state (if length > 0) and put it in link table */
155 static inline int buf_map_to_sec4_sg(struct device *jrdev,
156                                      struct sec4_sg_entry *sec4_sg,
157                                      struct caam_hash_state *state)
158 {
159         int buflen = state->buflen;
160
161         if (!buflen)
162                 return 0;
163
164         state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
165                                         DMA_TO_DEVICE);
166         if (dma_mapping_error(jrdev, state->buf_dma)) {
167                 dev_err(jrdev, "unable to map buf\n");
168                 state->buf_dma = 0;
169                 return -ENOMEM;
170         }
171
172         dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
173
174         return 0;
175 }
176
177 /* Map state->caam_ctx, and add it to link table */
178 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
179                                      struct caam_hash_state *state, int ctx_len,
180                                      struct sec4_sg_entry *sec4_sg, u32 flag)
181 {
182         state->ctx_dma_len = ctx_len;
183         state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
184         if (dma_mapping_error(jrdev, state->ctx_dma)) {
185                 dev_err(jrdev, "unable to map ctx\n");
186                 state->ctx_dma = 0;
187                 return -ENOMEM;
188         }
189
190         dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
191
192         return 0;
193 }
194
195 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
196 {
197         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
198         int digestsize = crypto_ahash_digestsize(ahash);
199         struct device *jrdev = ctx->jrdev;
200         struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
201         u32 *desc;
202
203         ctx->adata.key_virt = ctx->key;
204
205         /* ahash_update shared descriptor */
206         desc = ctx->sh_desc_update;
207         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
208                           ctx->ctx_len, true, ctrlpriv->era);
209         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
210                                    desc_bytes(desc), ctx->dir);
211
212         print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
213                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
214                              1);
215
216         /* ahash_update_first shared descriptor */
217         desc = ctx->sh_desc_update_first;
218         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
219                           ctx->ctx_len, false, ctrlpriv->era);
220         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
221                                    desc_bytes(desc), ctx->dir);
222         print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
223                              ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
224                              desc_bytes(desc), 1);
225
226         /* ahash_final shared descriptor */
227         desc = ctx->sh_desc_fin;
228         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
229                           ctx->ctx_len, true, ctrlpriv->era);
230         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
231                                    desc_bytes(desc), ctx->dir);
232
233         print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
234                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
235                              desc_bytes(desc), 1);
236
237         /* ahash_digest shared descriptor */
238         desc = ctx->sh_desc_digest;
239         cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
240                           ctx->ctx_len, false, ctrlpriv->era);
241         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
242                                    desc_bytes(desc), ctx->dir);
243
244         print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
245                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
246                              desc_bytes(desc), 1);
247
248         return 0;
249 }
250
251 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
252 {
253         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
254         int digestsize = crypto_ahash_digestsize(ahash);
255         struct device *jrdev = ctx->jrdev;
256         u32 *desc;
257
258         /* shared descriptor for ahash_update */
259         desc = ctx->sh_desc_update;
260         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
261                             ctx->ctx_len, ctx->ctx_len);
262         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
263                                    desc_bytes(desc), ctx->dir);
264         print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
265                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
266                              1);
267
268         /* shared descriptor for ahash_{final,finup} */
269         desc = ctx->sh_desc_fin;
270         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
271                             digestsize, ctx->ctx_len);
272         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
273                                    desc_bytes(desc), ctx->dir);
274         print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
275                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
276                              1);
277
278         /* key is immediate data for INIT and INITFINAL states */
279         ctx->adata.key_virt = ctx->key;
280
281         /* shared descriptor for first invocation of ahash_update */
282         desc = ctx->sh_desc_update_first;
283         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
284                             ctx->ctx_len);
285         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
286                                    desc_bytes(desc), ctx->dir);
287         print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
288                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
289                              desc_bytes(desc), 1);
290
291         /* shared descriptor for ahash_digest */
292         desc = ctx->sh_desc_digest;
293         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
294                             digestsize, ctx->ctx_len);
295         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
296                                    desc_bytes(desc), ctx->dir);
297         print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
298                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
299                              1);
300         return 0;
301 }
302
303 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
304 {
305         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
306         int digestsize = crypto_ahash_digestsize(ahash);
307         struct device *jrdev = ctx->jrdev;
308         u32 *desc;
309
310         /* shared descriptor for ahash_update */
311         desc = ctx->sh_desc_update;
312         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
313                             ctx->ctx_len, ctx->ctx_len);
314         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
315                                    desc_bytes(desc), ctx->dir);
316         print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
317                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
318                              desc_bytes(desc), 1);
319
320         /* shared descriptor for ahash_{final,finup} */
321         desc = ctx->sh_desc_fin;
322         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
323                             digestsize, ctx->ctx_len);
324         dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
325                                    desc_bytes(desc), ctx->dir);
326         print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
327                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
328                              desc_bytes(desc), 1);
329
330         /* shared descriptor for first invocation of ahash_update */
331         desc = ctx->sh_desc_update_first;
332         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
333                             ctx->ctx_len);
334         dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
335                                    desc_bytes(desc), ctx->dir);
336         print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
337                              " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
338                              desc_bytes(desc), 1);
339
340         /* shared descriptor for ahash_digest */
341         desc = ctx->sh_desc_digest;
342         cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
343                             digestsize, ctx->ctx_len);
344         dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
345                                    desc_bytes(desc), ctx->dir);
346         print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
347                              DUMP_PREFIX_ADDRESS, 16, 4, desc,
348                              desc_bytes(desc), 1);
349
350         return 0;
351 }
352
353 /* Digest hash size if it is too large */
354 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
355                            u32 digestsize)
356 {
357         struct device *jrdev = ctx->jrdev;
358         u32 *desc;
359         struct split_key_result result;
360         dma_addr_t key_dma;
361         int ret;
362
363         desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
364         if (!desc) {
365                 dev_err(jrdev, "unable to allocate key input memory\n");
366                 return -ENOMEM;
367         }
368
369         init_job_desc(desc, 0);
370
371         key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
372         if (dma_mapping_error(jrdev, key_dma)) {
373                 dev_err(jrdev, "unable to map key memory\n");
374                 kfree(desc);
375                 return -ENOMEM;
376         }
377
378         /* Job descriptor to perform unkeyed hash on key_in */
379         append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
380                          OP_ALG_AS_INITFINAL);
381         append_seq_in_ptr(desc, key_dma, *keylen, 0);
382         append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
383                              FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
384         append_seq_out_ptr(desc, key_dma, digestsize, 0);
385         append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
386                          LDST_SRCDST_BYTE_CONTEXT);
387
388         print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
389                              DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
390         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
391                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
392                              1);
393
394         result.err = 0;
395         init_completion(&result.completion);
396
397         ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
398         if (!ret) {
399                 /* in progress */
400                 wait_for_completion(&result.completion);
401                 ret = result.err;
402
403                 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
404                                      DUMP_PREFIX_ADDRESS, 16, 4, key,
405                                      digestsize, 1);
406         }
407         dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
408
409         *keylen = digestsize;
410
411         kfree(desc);
412
413         return ret;
414 }
415
416 static int ahash_setkey(struct crypto_ahash *ahash,
417                         const u8 *key, unsigned int keylen)
418 {
419         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
420         struct device *jrdev = ctx->jrdev;
421         int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
422         int digestsize = crypto_ahash_digestsize(ahash);
423         struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
424         int ret;
425         u8 *hashed_key = NULL;
426
427         dev_dbg(jrdev, "keylen %d\n", keylen);
428
429         if (keylen > blocksize) {
430                 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
431                 if (!hashed_key)
432                         return -ENOMEM;
433                 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
434                 if (ret)
435                         goto bad_free_key;
436                 key = hashed_key;
437         }
438
439         /*
440          * If DKP is supported, use it in the shared descriptor to generate
441          * the split key.
442          */
443         if (ctrlpriv->era >= 6) {
444                 ctx->adata.key_inline = true;
445                 ctx->adata.keylen = keylen;
446                 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
447                                                       OP_ALG_ALGSEL_MASK);
448
449                 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
450                         goto bad_free_key;
451
452                 memcpy(ctx->key, key, keylen);
453
454                 /*
455                  * In case |user key| > |derived key|, using DKP<imm,imm>
456                  * would result in invalid opcodes (last bytes of user key) in
457                  * the resulting descriptor. Use DKP<ptr,imm> instead => both
458                  * virtual and dma key addresses are needed.
459                  */
460                 if (keylen > ctx->adata.keylen_pad)
461                         dma_sync_single_for_device(ctx->jrdev,
462                                                    ctx->adata.key_dma,
463                                                    ctx->adata.keylen_pad,
464                                                    DMA_TO_DEVICE);
465         } else {
466                 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
467                                     keylen, CAAM_MAX_HASH_KEY_SIZE);
468                 if (ret)
469                         goto bad_free_key;
470         }
471
472         kfree(hashed_key);
473         return ahash_set_sh_desc(ahash);
474  bad_free_key:
475         kfree(hashed_key);
476         return -EINVAL;
477 }
478
479 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
480                         unsigned int keylen)
481 {
482         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
483         struct device *jrdev = ctx->jrdev;
484
485         if (keylen != AES_KEYSIZE_128)
486                 return -EINVAL;
487
488         memcpy(ctx->key, key, keylen);
489         dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
490                                    DMA_TO_DEVICE);
491         ctx->adata.keylen = keylen;
492
493         print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
494                              DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
495
496         return axcbc_set_sh_desc(ahash);
497 }
498
499 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
500                         unsigned int keylen)
501 {
502         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
503         int err;
504
505         err = aes_check_keylen(keylen);
506         if (err)
507                 return err;
508
509         /* key is immediate data for all cmac shared descriptors */
510         ctx->adata.key_virt = key;
511         ctx->adata.keylen = keylen;
512
513         print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
514                              DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
515
516         return acmac_set_sh_desc(ahash);
517 }
518
519 /*
520  * ahash_edesc - s/w-extended ahash descriptor
521  * @sec4_sg_dma: physical mapped address of h/w link table
522  * @src_nents: number of segments in input scatterlist
523  * @sec4_sg_bytes: length of dma mapped sec4_sg space
524  * @hw_desc: the h/w job descriptor followed by any referenced link tables
525  * @sec4_sg: h/w link table
526  */
527 struct ahash_edesc {
528         dma_addr_t sec4_sg_dma;
529         int src_nents;
530         int sec4_sg_bytes;
531         u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
532         struct sec4_sg_entry sec4_sg[0];
533 };
534
535 static inline void ahash_unmap(struct device *dev,
536                         struct ahash_edesc *edesc,
537                         struct ahash_request *req, int dst_len)
538 {
539         struct caam_hash_state *state = ahash_request_ctx(req);
540
541         if (edesc->src_nents)
542                 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
543
544         if (edesc->sec4_sg_bytes)
545                 dma_unmap_single(dev, edesc->sec4_sg_dma,
546                                  edesc->sec4_sg_bytes, DMA_TO_DEVICE);
547
548         if (state->buf_dma) {
549                 dma_unmap_single(dev, state->buf_dma, state->buflen,
550                                  DMA_TO_DEVICE);
551                 state->buf_dma = 0;
552         }
553 }
554
555 static inline void ahash_unmap_ctx(struct device *dev,
556                         struct ahash_edesc *edesc,
557                         struct ahash_request *req, int dst_len, u32 flag)
558 {
559         struct caam_hash_state *state = ahash_request_ctx(req);
560
561         if (state->ctx_dma) {
562                 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
563                 state->ctx_dma = 0;
564         }
565         ahash_unmap(dev, edesc, req, dst_len);
566 }
567
568 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
569                        void *context)
570 {
571         struct ahash_request *req = context;
572         struct ahash_edesc *edesc;
573         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
574         int digestsize = crypto_ahash_digestsize(ahash);
575         struct caam_hash_state *state = ahash_request_ctx(req);
576         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
577         int ecode = 0;
578
579         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
580
581         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
582         if (err)
583                 ecode = caam_jr_strstatus(jrdev, err);
584
585         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
586         memcpy(req->result, state->caam_ctx, digestsize);
587         kfree(edesc);
588
589         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
590                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
591                              ctx->ctx_len, 1);
592
593         req->base.complete(&req->base, ecode);
594 }
595
596 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
597                             void *context)
598 {
599         struct ahash_request *req = context;
600         struct ahash_edesc *edesc;
601         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
602         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
603         struct caam_hash_state *state = ahash_request_ctx(req);
604         int digestsize = crypto_ahash_digestsize(ahash);
605         int ecode = 0;
606
607         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
608
609         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
610         if (err)
611                 ecode = caam_jr_strstatus(jrdev, err);
612
613         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
614         kfree(edesc);
615
616         scatterwalk_map_and_copy(state->buf, req->src,
617                                  req->nbytes - state->next_buflen,
618                                  state->next_buflen, 0);
619         state->buflen = state->next_buflen;
620
621         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
622                              DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
623                              state->buflen, 1);
624
625         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
626                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
627                              ctx->ctx_len, 1);
628         if (req->result)
629                 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
630                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
631                                      digestsize, 1);
632
633         req->base.complete(&req->base, ecode);
634 }
635
636 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
637                                void *context)
638 {
639         struct ahash_request *req = context;
640         struct ahash_edesc *edesc;
641         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642         int digestsize = crypto_ahash_digestsize(ahash);
643         struct caam_hash_state *state = ahash_request_ctx(req);
644         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
645         int ecode = 0;
646
647         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
648
649         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
650         if (err)
651                 ecode = caam_jr_strstatus(jrdev, err);
652
653         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
654         memcpy(req->result, state->caam_ctx, digestsize);
655         kfree(edesc);
656
657         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
658                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
659                              ctx->ctx_len, 1);
660
661         req->base.complete(&req->base, ecode);
662 }
663
664 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
665                                void *context)
666 {
667         struct ahash_request *req = context;
668         struct ahash_edesc *edesc;
669         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
670         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
671         struct caam_hash_state *state = ahash_request_ctx(req);
672         int digestsize = crypto_ahash_digestsize(ahash);
673         int ecode = 0;
674
675         dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
676
677         edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
678         if (err)
679                 ecode = caam_jr_strstatus(jrdev, err);
680
681         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
682         kfree(edesc);
683
684         scatterwalk_map_and_copy(state->buf, req->src,
685                                  req->nbytes - state->next_buflen,
686                                  state->next_buflen, 0);
687         state->buflen = state->next_buflen;
688
689         print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
690                              DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
691                              state->buflen, 1);
692
693         print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
694                              DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
695                              ctx->ctx_len, 1);
696         if (req->result)
697                 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
698                                      DUMP_PREFIX_ADDRESS, 16, 4, req->result,
699                                      digestsize, 1);
700
701         req->base.complete(&req->base, ecode);
702 }
703
704 /*
705  * Allocate an enhanced descriptor, which contains the hardware descriptor
706  * and space for hardware scatter table containing sg_num entries.
707  */
708 static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx,
709                                              int sg_num, u32 *sh_desc,
710                                              dma_addr_t sh_desc_dma,
711                                              gfp_t flags)
712 {
713         struct ahash_edesc *edesc;
714         unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
715
716         edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
717         if (!edesc) {
718                 dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
719                 return NULL;
720         }
721
722         init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
723                              HDR_SHARE_DEFER | HDR_REVERSE);
724
725         return edesc;
726 }
727
728 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
729                                struct ahash_edesc *edesc,
730                                struct ahash_request *req, int nents,
731                                unsigned int first_sg,
732                                unsigned int first_bytes, size_t to_hash)
733 {
734         dma_addr_t src_dma;
735         u32 options;
736
737         if (nents > 1 || first_sg) {
738                 struct sec4_sg_entry *sg = edesc->sec4_sg;
739                 unsigned int sgsize = sizeof(*sg) *
740                                       pad_sg_nents(first_sg + nents);
741
742                 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
743
744                 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
745                 if (dma_mapping_error(ctx->jrdev, src_dma)) {
746                         dev_err(ctx->jrdev, "unable to map S/G table\n");
747                         return -ENOMEM;
748                 }
749
750                 edesc->sec4_sg_bytes = sgsize;
751                 edesc->sec4_sg_dma = src_dma;
752                 options = LDST_SGF;
753         } else {
754                 src_dma = sg_dma_address(req->src);
755                 options = 0;
756         }
757
758         append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
759                           options);
760
761         return 0;
762 }
763
764 /* submit update job descriptor */
765 static int ahash_update_ctx(struct ahash_request *req)
766 {
767         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
768         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
769         struct caam_hash_state *state = ahash_request_ctx(req);
770         struct device *jrdev = ctx->jrdev;
771         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
772                        GFP_KERNEL : GFP_ATOMIC;
773         u8 *buf = state->buf;
774         int *buflen = &state->buflen;
775         int *next_buflen = &state->next_buflen;
776         int blocksize = crypto_ahash_blocksize(ahash);
777         int in_len = *buflen + req->nbytes, to_hash;
778         u32 *desc;
779         int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
780         struct ahash_edesc *edesc;
781         int ret = 0;
782
783         *next_buflen = in_len & (blocksize - 1);
784         to_hash = in_len - *next_buflen;
785
786         /*
787          * For XCBC and CMAC, if to_hash is multiple of block size,
788          * keep last block in internal buffer
789          */
790         if ((is_xcbc_aes(ctx->adata.algtype) ||
791              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
792              (*next_buflen == 0)) {
793                 *next_buflen = blocksize;
794                 to_hash -= blocksize;
795         }
796
797         if (to_hash) {
798                 int pad_nents;
799                 int src_len = req->nbytes - *next_buflen;
800
801                 src_nents = sg_nents_for_len(req->src, src_len);
802                 if (src_nents < 0) {
803                         dev_err(jrdev, "Invalid number of src SG.\n");
804                         return src_nents;
805                 }
806
807                 if (src_nents) {
808                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
809                                                   DMA_TO_DEVICE);
810                         if (!mapped_nents) {
811                                 dev_err(jrdev, "unable to DMA map source\n");
812                                 return -ENOMEM;
813                         }
814                 } else {
815                         mapped_nents = 0;
816                 }
817
818                 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
819                 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
820                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
821
822                 /*
823                  * allocate space for base edesc and hw desc commands,
824                  * link tables
825                  */
826                 edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update,
827                                           ctx->sh_desc_update_dma, flags);
828                 if (!edesc) {
829                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
830                         return -ENOMEM;
831                 }
832
833                 edesc->src_nents = src_nents;
834                 edesc->sec4_sg_bytes = sec4_sg_bytes;
835
836                 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
837                                          edesc->sec4_sg, DMA_BIDIRECTIONAL);
838                 if (ret)
839                         goto unmap_ctx;
840
841                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
842                 if (ret)
843                         goto unmap_ctx;
844
845                 if (mapped_nents)
846                         sg_to_sec4_sg_last(req->src, src_len,
847                                            edesc->sec4_sg + sec4_sg_src_index,
848                                            0);
849                 else
850                         sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
851                                             1);
852
853                 desc = edesc->hw_desc;
854
855                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
856                                                      sec4_sg_bytes,
857                                                      DMA_TO_DEVICE);
858                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
859                         dev_err(jrdev, "unable to map S/G table\n");
860                         ret = -ENOMEM;
861                         goto unmap_ctx;
862                 }
863
864                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
865                                        to_hash, LDST_SGF);
866
867                 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
868
869                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
870                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
871                                      desc_bytes(desc), 1);
872
873                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
874                 if (ret)
875                         goto unmap_ctx;
876
877                 ret = -EINPROGRESS;
878         } else if (*next_buflen) {
879                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
880                                          req->nbytes, 0);
881                 *buflen = *next_buflen;
882
883                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
884                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
885                                      *buflen, 1);
886         }
887
888         return ret;
889 unmap_ctx:
890         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
891         kfree(edesc);
892         return ret;
893 }
894
895 static int ahash_final_ctx(struct ahash_request *req)
896 {
897         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
898         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
899         struct caam_hash_state *state = ahash_request_ctx(req);
900         struct device *jrdev = ctx->jrdev;
901         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
902                        GFP_KERNEL : GFP_ATOMIC;
903         int buflen = state->buflen;
904         u32 *desc;
905         int sec4_sg_bytes;
906         int digestsize = crypto_ahash_digestsize(ahash);
907         struct ahash_edesc *edesc;
908         int ret;
909
910         sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
911                         sizeof(struct sec4_sg_entry);
912
913         /* allocate space for base edesc and hw desc commands, link tables */
914         edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin,
915                                   ctx->sh_desc_fin_dma, flags);
916         if (!edesc)
917                 return -ENOMEM;
918
919         desc = edesc->hw_desc;
920
921         edesc->sec4_sg_bytes = sec4_sg_bytes;
922
923         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
924                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
925         if (ret)
926                 goto unmap_ctx;
927
928         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
929         if (ret)
930                 goto unmap_ctx;
931
932         sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
933
934         edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
935                                             sec4_sg_bytes, DMA_TO_DEVICE);
936         if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
937                 dev_err(jrdev, "unable to map S/G table\n");
938                 ret = -ENOMEM;
939                 goto unmap_ctx;
940         }
941
942         append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
943                           LDST_SGF);
944         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
945
946         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
947                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
948                              1);
949
950         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
951         if (ret)
952                 goto unmap_ctx;
953
954         return -EINPROGRESS;
955  unmap_ctx:
956         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
957         kfree(edesc);
958         return ret;
959 }
960
961 static int ahash_finup_ctx(struct ahash_request *req)
962 {
963         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
964         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
965         struct caam_hash_state *state = ahash_request_ctx(req);
966         struct device *jrdev = ctx->jrdev;
967         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
968                        GFP_KERNEL : GFP_ATOMIC;
969         int buflen = state->buflen;
970         u32 *desc;
971         int sec4_sg_src_index;
972         int src_nents, mapped_nents;
973         int digestsize = crypto_ahash_digestsize(ahash);
974         struct ahash_edesc *edesc;
975         int ret;
976
977         src_nents = sg_nents_for_len(req->src, req->nbytes);
978         if (src_nents < 0) {
979                 dev_err(jrdev, "Invalid number of src SG.\n");
980                 return src_nents;
981         }
982
983         if (src_nents) {
984                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
985                                           DMA_TO_DEVICE);
986                 if (!mapped_nents) {
987                         dev_err(jrdev, "unable to DMA map source\n");
988                         return -ENOMEM;
989                 }
990         } else {
991                 mapped_nents = 0;
992         }
993
994         sec4_sg_src_index = 1 + (buflen ? 1 : 0);
995
996         /* allocate space for base edesc and hw desc commands, link tables */
997         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
998                                   ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
999                                   flags);
1000         if (!edesc) {
1001                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1002                 return -ENOMEM;
1003         }
1004
1005         desc = edesc->hw_desc;
1006
1007         edesc->src_nents = src_nents;
1008
1009         ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1010                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
1011         if (ret)
1012                 goto unmap_ctx;
1013
1014         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1015         if (ret)
1016                 goto unmap_ctx;
1017
1018         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1019                                   sec4_sg_src_index, ctx->ctx_len + buflen,
1020                                   req->nbytes);
1021         if (ret)
1022                 goto unmap_ctx;
1023
1024         append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1025
1026         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1027                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1028                              1);
1029
1030         ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1031         if (ret)
1032                 goto unmap_ctx;
1033
1034         return -EINPROGRESS;
1035  unmap_ctx:
1036         ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1037         kfree(edesc);
1038         return ret;
1039 }
1040
1041 static int ahash_digest(struct ahash_request *req)
1042 {
1043         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1044         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1045         struct caam_hash_state *state = ahash_request_ctx(req);
1046         struct device *jrdev = ctx->jrdev;
1047         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1048                        GFP_KERNEL : GFP_ATOMIC;
1049         u32 *desc;
1050         int digestsize = crypto_ahash_digestsize(ahash);
1051         int src_nents, mapped_nents;
1052         struct ahash_edesc *edesc;
1053         int ret;
1054
1055         state->buf_dma = 0;
1056
1057         src_nents = sg_nents_for_len(req->src, req->nbytes);
1058         if (src_nents < 0) {
1059                 dev_err(jrdev, "Invalid number of src SG.\n");
1060                 return src_nents;
1061         }
1062
1063         if (src_nents) {
1064                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1065                                           DMA_TO_DEVICE);
1066                 if (!mapped_nents) {
1067                         dev_err(jrdev, "unable to map source for DMA\n");
1068                         return -ENOMEM;
1069                 }
1070         } else {
1071                 mapped_nents = 0;
1072         }
1073
1074         /* allocate space for base edesc and hw desc commands, link tables */
1075         edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0,
1076                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1077                                   flags);
1078         if (!edesc) {
1079                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1080                 return -ENOMEM;
1081         }
1082
1083         edesc->src_nents = src_nents;
1084
1085         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1086                                   req->nbytes);
1087         if (ret) {
1088                 ahash_unmap(jrdev, edesc, req, digestsize);
1089                 kfree(edesc);
1090                 return ret;
1091         }
1092
1093         desc = edesc->hw_desc;
1094
1095         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1096         if (ret) {
1097                 ahash_unmap(jrdev, edesc, req, digestsize);
1098                 kfree(edesc);
1099                 return -ENOMEM;
1100         }
1101
1102         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1103                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1104                              1);
1105
1106         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1107         if (!ret) {
1108                 ret = -EINPROGRESS;
1109         } else {
1110                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1111                 kfree(edesc);
1112         }
1113
1114         return ret;
1115 }
1116
1117 /* submit ahash final if it the first job descriptor */
1118 static int ahash_final_no_ctx(struct ahash_request *req)
1119 {
1120         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1121         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1122         struct caam_hash_state *state = ahash_request_ctx(req);
1123         struct device *jrdev = ctx->jrdev;
1124         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1125                        GFP_KERNEL : GFP_ATOMIC;
1126         u8 *buf = state->buf;
1127         int buflen = state->buflen;
1128         u32 *desc;
1129         int digestsize = crypto_ahash_digestsize(ahash);
1130         struct ahash_edesc *edesc;
1131         int ret;
1132
1133         /* allocate space for base edesc and hw desc commands, link tables */
1134         edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest,
1135                                   ctx->sh_desc_digest_dma, flags);
1136         if (!edesc)
1137                 return -ENOMEM;
1138
1139         desc = edesc->hw_desc;
1140
1141         if (buflen) {
1142                 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1143                                                 DMA_TO_DEVICE);
1144                 if (dma_mapping_error(jrdev, state->buf_dma)) {
1145                         dev_err(jrdev, "unable to map src\n");
1146                         goto unmap;
1147                 }
1148
1149                 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1150         }
1151
1152         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1153         if (ret)
1154                 goto unmap;
1155
1156         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1157                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1158                              1);
1159
1160         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1161         if (!ret) {
1162                 ret = -EINPROGRESS;
1163         } else {
1164                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1165                 kfree(edesc);
1166         }
1167
1168         return ret;
1169  unmap:
1170         ahash_unmap(jrdev, edesc, req, digestsize);
1171         kfree(edesc);
1172         return -ENOMEM;
1173
1174 }
1175
1176 /* submit ahash update if it the first job descriptor after update */
1177 static int ahash_update_no_ctx(struct ahash_request *req)
1178 {
1179         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1180         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1181         struct caam_hash_state *state = ahash_request_ctx(req);
1182         struct device *jrdev = ctx->jrdev;
1183         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1184                        GFP_KERNEL : GFP_ATOMIC;
1185         u8 *buf = state->buf;
1186         int *buflen = &state->buflen;
1187         int *next_buflen = &state->next_buflen;
1188         int blocksize = crypto_ahash_blocksize(ahash);
1189         int in_len = *buflen + req->nbytes, to_hash;
1190         int sec4_sg_bytes, src_nents, mapped_nents;
1191         struct ahash_edesc *edesc;
1192         u32 *desc;
1193         int ret = 0;
1194
1195         *next_buflen = in_len & (blocksize - 1);
1196         to_hash = in_len - *next_buflen;
1197
1198         /*
1199          * For XCBC and CMAC, if to_hash is multiple of block size,
1200          * keep last block in internal buffer
1201          */
1202         if ((is_xcbc_aes(ctx->adata.algtype) ||
1203              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1204              (*next_buflen == 0)) {
1205                 *next_buflen = blocksize;
1206                 to_hash -= blocksize;
1207         }
1208
1209         if (to_hash) {
1210                 int pad_nents;
1211                 int src_len = req->nbytes - *next_buflen;
1212
1213                 src_nents = sg_nents_for_len(req->src, src_len);
1214                 if (src_nents < 0) {
1215                         dev_err(jrdev, "Invalid number of src SG.\n");
1216                         return src_nents;
1217                 }
1218
1219                 if (src_nents) {
1220                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1221                                                   DMA_TO_DEVICE);
1222                         if (!mapped_nents) {
1223                                 dev_err(jrdev, "unable to DMA map source\n");
1224                                 return -ENOMEM;
1225                         }
1226                 } else {
1227                         mapped_nents = 0;
1228                 }
1229
1230                 pad_nents = pad_sg_nents(1 + mapped_nents);
1231                 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1232
1233                 /*
1234                  * allocate space for base edesc and hw desc commands,
1235                  * link tables
1236                  */
1237                 edesc = ahash_edesc_alloc(ctx, pad_nents,
1238                                           ctx->sh_desc_update_first,
1239                                           ctx->sh_desc_update_first_dma,
1240                                           flags);
1241                 if (!edesc) {
1242                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1243                         return -ENOMEM;
1244                 }
1245
1246                 edesc->src_nents = src_nents;
1247                 edesc->sec4_sg_bytes = sec4_sg_bytes;
1248
1249                 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1250                 if (ret)
1251                         goto unmap_ctx;
1252
1253                 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1254
1255                 desc = edesc->hw_desc;
1256
1257                 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1258                                                     sec4_sg_bytes,
1259                                                     DMA_TO_DEVICE);
1260                 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1261                         dev_err(jrdev, "unable to map S/G table\n");
1262                         ret = -ENOMEM;
1263                         goto unmap_ctx;
1264                 }
1265
1266                 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1267
1268                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1269                 if (ret)
1270                         goto unmap_ctx;
1271
1272                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1273                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1274                                      desc_bytes(desc), 1);
1275
1276                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1277                 if (ret)
1278                         goto unmap_ctx;
1279
1280                 ret = -EINPROGRESS;
1281                 state->update = ahash_update_ctx;
1282                 state->finup = ahash_finup_ctx;
1283                 state->final = ahash_final_ctx;
1284         } else if (*next_buflen) {
1285                 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1286                                          req->nbytes, 0);
1287                 *buflen = *next_buflen;
1288
1289                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1290                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1291                                      *buflen, 1);
1292         }
1293
1294         return ret;
1295  unmap_ctx:
1296         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1297         kfree(edesc);
1298         return ret;
1299 }
1300
1301 /* submit ahash finup if it the first job descriptor after update */
1302 static int ahash_finup_no_ctx(struct ahash_request *req)
1303 {
1304         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1305         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1306         struct caam_hash_state *state = ahash_request_ctx(req);
1307         struct device *jrdev = ctx->jrdev;
1308         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1309                        GFP_KERNEL : GFP_ATOMIC;
1310         int buflen = state->buflen;
1311         u32 *desc;
1312         int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1313         int digestsize = crypto_ahash_digestsize(ahash);
1314         struct ahash_edesc *edesc;
1315         int ret;
1316
1317         src_nents = sg_nents_for_len(req->src, req->nbytes);
1318         if (src_nents < 0) {
1319                 dev_err(jrdev, "Invalid number of src SG.\n");
1320                 return src_nents;
1321         }
1322
1323         if (src_nents) {
1324                 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1325                                           DMA_TO_DEVICE);
1326                 if (!mapped_nents) {
1327                         dev_err(jrdev, "unable to DMA map source\n");
1328                         return -ENOMEM;
1329                 }
1330         } else {
1331                 mapped_nents = 0;
1332         }
1333
1334         sec4_sg_src_index = 2;
1335         sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1336                          sizeof(struct sec4_sg_entry);
1337
1338         /* allocate space for base edesc and hw desc commands, link tables */
1339         edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
1340                                   ctx->sh_desc_digest, ctx->sh_desc_digest_dma,
1341                                   flags);
1342         if (!edesc) {
1343                 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1344                 return -ENOMEM;
1345         }
1346
1347         desc = edesc->hw_desc;
1348
1349         edesc->src_nents = src_nents;
1350         edesc->sec4_sg_bytes = sec4_sg_bytes;
1351
1352         ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1353         if (ret)
1354                 goto unmap;
1355
1356         ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1357                                   req->nbytes);
1358         if (ret) {
1359                 dev_err(jrdev, "unable to map S/G table\n");
1360                 goto unmap;
1361         }
1362
1363         ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1364         if (ret)
1365                 goto unmap;
1366
1367         print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1368                              DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1369                              1);
1370
1371         ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1372         if (!ret) {
1373                 ret = -EINPROGRESS;
1374         } else {
1375                 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1376                 kfree(edesc);
1377         }
1378
1379         return ret;
1380  unmap:
1381         ahash_unmap(jrdev, edesc, req, digestsize);
1382         kfree(edesc);
1383         return -ENOMEM;
1384
1385 }
1386
1387 /* submit first update job descriptor after init */
1388 static int ahash_update_first(struct ahash_request *req)
1389 {
1390         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1391         struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1392         struct caam_hash_state *state = ahash_request_ctx(req);
1393         struct device *jrdev = ctx->jrdev;
1394         gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1395                        GFP_KERNEL : GFP_ATOMIC;
1396         u8 *buf = state->buf;
1397         int *buflen = &state->buflen;
1398         int *next_buflen = &state->next_buflen;
1399         int to_hash;
1400         int blocksize = crypto_ahash_blocksize(ahash);
1401         u32 *desc;
1402         int src_nents, mapped_nents;
1403         struct ahash_edesc *edesc;
1404         int ret = 0;
1405
1406         *next_buflen = req->nbytes & (blocksize - 1);
1407         to_hash = req->nbytes - *next_buflen;
1408
1409         /*
1410          * For XCBC and CMAC, if to_hash is multiple of block size,
1411          * keep last block in internal buffer
1412          */
1413         if ((is_xcbc_aes(ctx->adata.algtype) ||
1414              is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1415              (*next_buflen == 0)) {
1416                 *next_buflen = blocksize;
1417                 to_hash -= blocksize;
1418         }
1419
1420         if (to_hash) {
1421                 src_nents = sg_nents_for_len(req->src,
1422                                              req->nbytes - *next_buflen);
1423                 if (src_nents < 0) {
1424                         dev_err(jrdev, "Invalid number of src SG.\n");
1425                         return src_nents;
1426                 }
1427
1428                 if (src_nents) {
1429                         mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1430                                                   DMA_TO_DEVICE);
1431                         if (!mapped_nents) {
1432                                 dev_err(jrdev, "unable to map source for DMA\n");
1433                                 return -ENOMEM;
1434                         }
1435                 } else {
1436                         mapped_nents = 0;
1437                 }
1438
1439                 /*
1440                  * allocate space for base edesc and hw desc commands,
1441                  * link tables
1442                  */
1443                 edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ?
1444                                           mapped_nents : 0,
1445                                           ctx->sh_desc_update_first,
1446                                           ctx->sh_desc_update_first_dma,
1447                                           flags);
1448                 if (!edesc) {
1449                         dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1450                         return -ENOMEM;
1451                 }
1452
1453                 edesc->src_nents = src_nents;
1454
1455                 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1456                                           to_hash);
1457                 if (ret)
1458                         goto unmap_ctx;
1459
1460                 desc = edesc->hw_desc;
1461
1462                 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1463                 if (ret)
1464                         goto unmap_ctx;
1465
1466                 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1467                                      DUMP_PREFIX_ADDRESS, 16, 4, desc,
1468                                      desc_bytes(desc), 1);
1469
1470                 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1471                 if (ret)
1472                         goto unmap_ctx;
1473
1474                 ret = -EINPROGRESS;
1475                 state->update = ahash_update_ctx;
1476                 state->finup = ahash_finup_ctx;
1477                 state->final = ahash_final_ctx;
1478         } else if (*next_buflen) {
1479                 state->update = ahash_update_no_ctx;
1480                 state->finup = ahash_finup_no_ctx;
1481                 state->final = ahash_final_no_ctx;
1482                 scatterwalk_map_and_copy(buf, req->src, 0,
1483                                          req->nbytes, 0);
1484                 *buflen = *next_buflen;
1485
1486                 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1487                                      DUMP_PREFIX_ADDRESS, 16, 4, buf,
1488                                      *buflen, 1);
1489         }
1490
1491         return ret;
1492  unmap_ctx:
1493         ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1494         kfree(edesc);
1495         return ret;
1496 }
1497
1498 static int ahash_finup_first(struct ahash_request *req)
1499 {
1500         return ahash_digest(req);
1501 }
1502
1503 static int ahash_init(struct ahash_request *req)
1504 {
1505         struct caam_hash_state *state = ahash_request_ctx(req);
1506
1507         state->update = ahash_update_first;
1508         state->finup = ahash_finup_first;
1509         state->final = ahash_final_no_ctx;
1510
1511         state->ctx_dma = 0;
1512         state->ctx_dma_len = 0;
1513         state->buf_dma = 0;
1514         state->buflen = 0;
1515         state->next_buflen = 0;
1516
1517         return 0;
1518 }
1519
1520 static int ahash_update(struct ahash_request *req)
1521 {
1522         struct caam_hash_state *state = ahash_request_ctx(req);
1523
1524         return state->update(req);
1525 }
1526
1527 static int ahash_finup(struct ahash_request *req)
1528 {
1529         struct caam_hash_state *state = ahash_request_ctx(req);
1530
1531         return state->finup(req);
1532 }
1533
1534 static int ahash_final(struct ahash_request *req)
1535 {
1536         struct caam_hash_state *state = ahash_request_ctx(req);
1537
1538         return state->final(req);
1539 }
1540
1541 static int ahash_export(struct ahash_request *req, void *out)
1542 {
1543         struct caam_hash_state *state = ahash_request_ctx(req);
1544         struct caam_export_state *export = out;
1545         u8 *buf = state->buf;
1546         int len = state->buflen;
1547
1548         memcpy(export->buf, buf, len);
1549         memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1550         export->buflen = len;
1551         export->update = state->update;
1552         export->final = state->final;
1553         export->finup = state->finup;
1554
1555         return 0;
1556 }
1557
1558 static int ahash_import(struct ahash_request *req, const void *in)
1559 {
1560         struct caam_hash_state *state = ahash_request_ctx(req);
1561         const struct caam_export_state *export = in;
1562
1563         memset(state, 0, sizeof(*state));
1564         memcpy(state->buf, export->buf, export->buflen);
1565         memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1566         state->buflen = export->buflen;
1567         state->update = export->update;
1568         state->final = export->final;
1569         state->finup = export->finup;
1570
1571         return 0;
1572 }
1573
1574 struct caam_hash_template {
1575         char name[CRYPTO_MAX_ALG_NAME];
1576         char driver_name[CRYPTO_MAX_ALG_NAME];
1577         char hmac_name[CRYPTO_MAX_ALG_NAME];
1578         char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1579         unsigned int blocksize;
1580         struct ahash_alg template_ahash;
1581         u32 alg_type;
1582 };
1583
1584 /* ahash descriptors */
1585 static struct caam_hash_template driver_hash[] = {
1586         {
1587                 .name = "sha1",
1588                 .driver_name = "sha1-caam",
1589                 .hmac_name = "hmac(sha1)",
1590                 .hmac_driver_name = "hmac-sha1-caam",
1591                 .blocksize = SHA1_BLOCK_SIZE,
1592                 .template_ahash = {
1593                         .init = ahash_init,
1594                         .update = ahash_update,
1595                         .final = ahash_final,
1596                         .finup = ahash_finup,
1597                         .digest = ahash_digest,
1598                         .export = ahash_export,
1599                         .import = ahash_import,
1600                         .setkey = ahash_setkey,
1601                         .halg = {
1602                                 .digestsize = SHA1_DIGEST_SIZE,
1603                                 .statesize = sizeof(struct caam_export_state),
1604                         },
1605                 },
1606                 .alg_type = OP_ALG_ALGSEL_SHA1,
1607         }, {
1608                 .name = "sha224",
1609                 .driver_name = "sha224-caam",
1610                 .hmac_name = "hmac(sha224)",
1611                 .hmac_driver_name = "hmac-sha224-caam",
1612                 .blocksize = SHA224_BLOCK_SIZE,
1613                 .template_ahash = {
1614                         .init = ahash_init,
1615                         .update = ahash_update,
1616                         .final = ahash_final,
1617                         .finup = ahash_finup,
1618                         .digest = ahash_digest,
1619                         .export = ahash_export,
1620                         .import = ahash_import,
1621                         .setkey = ahash_setkey,
1622                         .halg = {
1623                                 .digestsize = SHA224_DIGEST_SIZE,
1624                                 .statesize = sizeof(struct caam_export_state),
1625                         },
1626                 },
1627                 .alg_type = OP_ALG_ALGSEL_SHA224,
1628         }, {
1629                 .name = "sha256",
1630                 .driver_name = "sha256-caam",
1631                 .hmac_name = "hmac(sha256)",
1632                 .hmac_driver_name = "hmac-sha256-caam",
1633                 .blocksize = SHA256_BLOCK_SIZE,
1634                 .template_ahash = {
1635                         .init = ahash_init,
1636                         .update = ahash_update,
1637                         .final = ahash_final,
1638                         .finup = ahash_finup,
1639                         .digest = ahash_digest,
1640                         .export = ahash_export,
1641                         .import = ahash_import,
1642                         .setkey = ahash_setkey,
1643                         .halg = {
1644                                 .digestsize = SHA256_DIGEST_SIZE,
1645                                 .statesize = sizeof(struct caam_export_state),
1646                         },
1647                 },
1648                 .alg_type = OP_ALG_ALGSEL_SHA256,
1649         }, {
1650                 .name = "sha384",
1651                 .driver_name = "sha384-caam",
1652                 .hmac_name = "hmac(sha384)",
1653                 .hmac_driver_name = "hmac-sha384-caam",
1654                 .blocksize = SHA384_BLOCK_SIZE,
1655                 .template_ahash = {
1656                         .init = ahash_init,
1657                         .update = ahash_update,
1658                         .final = ahash_final,
1659                         .finup = ahash_finup,
1660                         .digest = ahash_digest,
1661                         .export = ahash_export,
1662                         .import = ahash_import,
1663                         .setkey = ahash_setkey,
1664                         .halg = {
1665                                 .digestsize = SHA384_DIGEST_SIZE,
1666                                 .statesize = sizeof(struct caam_export_state),
1667                         },
1668                 },
1669                 .alg_type = OP_ALG_ALGSEL_SHA384,
1670         }, {
1671                 .name = "sha512",
1672                 .driver_name = "sha512-caam",
1673                 .hmac_name = "hmac(sha512)",
1674                 .hmac_driver_name = "hmac-sha512-caam",
1675                 .blocksize = SHA512_BLOCK_SIZE,
1676                 .template_ahash = {
1677                         .init = ahash_init,
1678                         .update = ahash_update,
1679                         .final = ahash_final,
1680                         .finup = ahash_finup,
1681                         .digest = ahash_digest,
1682                         .export = ahash_export,
1683                         .import = ahash_import,
1684                         .setkey = ahash_setkey,
1685                         .halg = {
1686                                 .digestsize = SHA512_DIGEST_SIZE,
1687                                 .statesize = sizeof(struct caam_export_state),
1688                         },
1689                 },
1690                 .alg_type = OP_ALG_ALGSEL_SHA512,
1691         }, {
1692                 .name = "md5",
1693                 .driver_name = "md5-caam",
1694                 .hmac_name = "hmac(md5)",
1695                 .hmac_driver_name = "hmac-md5-caam",
1696                 .blocksize = MD5_BLOCK_WORDS * 4,
1697                 .template_ahash = {
1698                         .init = ahash_init,
1699                         .update = ahash_update,
1700                         .final = ahash_final,
1701                         .finup = ahash_finup,
1702                         .digest = ahash_digest,
1703                         .export = ahash_export,
1704                         .import = ahash_import,
1705                         .setkey = ahash_setkey,
1706                         .halg = {
1707                                 .digestsize = MD5_DIGEST_SIZE,
1708                                 .statesize = sizeof(struct caam_export_state),
1709                         },
1710                 },
1711                 .alg_type = OP_ALG_ALGSEL_MD5,
1712         }, {
1713                 .hmac_name = "xcbc(aes)",
1714                 .hmac_driver_name = "xcbc-aes-caam",
1715                 .blocksize = AES_BLOCK_SIZE,
1716                 .template_ahash = {
1717                         .init = ahash_init,
1718                         .update = ahash_update,
1719                         .final = ahash_final,
1720                         .finup = ahash_finup,
1721                         .digest = ahash_digest,
1722                         .export = ahash_export,
1723                         .import = ahash_import,
1724                         .setkey = axcbc_setkey,
1725                         .halg = {
1726                                 .digestsize = AES_BLOCK_SIZE,
1727                                 .statesize = sizeof(struct caam_export_state),
1728                         },
1729                  },
1730                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1731         }, {
1732                 .hmac_name = "cmac(aes)",
1733                 .hmac_driver_name = "cmac-aes-caam",
1734                 .blocksize = AES_BLOCK_SIZE,
1735                 .template_ahash = {
1736                         .init = ahash_init,
1737                         .update = ahash_update,
1738                         .final = ahash_final,
1739                         .finup = ahash_finup,
1740                         .digest = ahash_digest,
1741                         .export = ahash_export,
1742                         .import = ahash_import,
1743                         .setkey = acmac_setkey,
1744                         .halg = {
1745                                 .digestsize = AES_BLOCK_SIZE,
1746                                 .statesize = sizeof(struct caam_export_state),
1747                         },
1748                  },
1749                 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1750         },
1751 };
1752
1753 struct caam_hash_alg {
1754         struct list_head entry;
1755         int alg_type;
1756         struct ahash_alg ahash_alg;
1757 };
1758
1759 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1760 {
1761         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1762         struct crypto_alg *base = tfm->__crt_alg;
1763         struct hash_alg_common *halg =
1764                  container_of(base, struct hash_alg_common, base);
1765         struct ahash_alg *alg =
1766                  container_of(halg, struct ahash_alg, halg);
1767         struct caam_hash_alg *caam_hash =
1768                  container_of(alg, struct caam_hash_alg, ahash_alg);
1769         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1770         /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1771         static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1772                                          HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1773                                          HASH_MSG_LEN + 32,
1774                                          HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1775                                          HASH_MSG_LEN + 64,
1776                                          HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1777         dma_addr_t dma_addr;
1778         struct caam_drv_private *priv;
1779
1780         /*
1781          * Get a Job ring from Job Ring driver to ensure in-order
1782          * crypto request processing per tfm
1783          */
1784         ctx->jrdev = caam_jr_alloc();
1785         if (IS_ERR(ctx->jrdev)) {
1786                 pr_err("Job Ring Device allocation for transform failed\n");
1787                 return PTR_ERR(ctx->jrdev);
1788         }
1789
1790         priv = dev_get_drvdata(ctx->jrdev->parent);
1791
1792         if (is_xcbc_aes(caam_hash->alg_type)) {
1793                 ctx->dir = DMA_TO_DEVICE;
1794                 ctx->key_dir = DMA_BIDIRECTIONAL;
1795                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1796                 ctx->ctx_len = 48;
1797         } else if (is_cmac_aes(caam_hash->alg_type)) {
1798                 ctx->dir = DMA_TO_DEVICE;
1799                 ctx->key_dir = DMA_NONE;
1800                 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1801                 ctx->ctx_len = 32;
1802         } else {
1803                 if (priv->era >= 6) {
1804                         ctx->dir = DMA_BIDIRECTIONAL;
1805                         ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1806                 } else {
1807                         ctx->dir = DMA_TO_DEVICE;
1808                         ctx->key_dir = DMA_NONE;
1809                 }
1810                 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1811                 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1812                                            OP_ALG_ALGSEL_SUBMASK) >>
1813                                           OP_ALG_ALGSEL_SHIFT];
1814         }
1815
1816         if (ctx->key_dir != DMA_NONE) {
1817                 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1818                                                           ARRAY_SIZE(ctx->key),
1819                                                           ctx->key_dir,
1820                                                           DMA_ATTR_SKIP_CPU_SYNC);
1821                 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1822                         dev_err(ctx->jrdev, "unable to map key\n");
1823                         caam_jr_free(ctx->jrdev);
1824                         return -ENOMEM;
1825                 }
1826         }
1827
1828         dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1829                                         offsetof(struct caam_hash_ctx, key),
1830                                         ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1831         if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1832                 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1833
1834                 if (ctx->key_dir != DMA_NONE)
1835                         dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1836                                                ARRAY_SIZE(ctx->key),
1837                                                ctx->key_dir,
1838                                                DMA_ATTR_SKIP_CPU_SYNC);
1839
1840                 caam_jr_free(ctx->jrdev);
1841                 return -ENOMEM;
1842         }
1843
1844         ctx->sh_desc_update_dma = dma_addr;
1845         ctx->sh_desc_update_first_dma = dma_addr +
1846                                         offsetof(struct caam_hash_ctx,
1847                                                  sh_desc_update_first);
1848         ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1849                                                    sh_desc_fin);
1850         ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1851                                                       sh_desc_digest);
1852
1853         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1854                                  sizeof(struct caam_hash_state));
1855
1856         /*
1857          * For keyed hash algorithms shared descriptors
1858          * will be created later in setkey() callback
1859          */
1860         return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1861 }
1862
1863 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1864 {
1865         struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1866
1867         dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1868                                offsetof(struct caam_hash_ctx, key),
1869                                ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1870         if (ctx->key_dir != DMA_NONE)
1871                 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1872                                        ARRAY_SIZE(ctx->key), ctx->key_dir,
1873                                        DMA_ATTR_SKIP_CPU_SYNC);
1874         caam_jr_free(ctx->jrdev);
1875 }
1876
1877 void caam_algapi_hash_exit(void)
1878 {
1879         struct caam_hash_alg *t_alg, *n;
1880
1881         if (!hash_list.next)
1882                 return;
1883
1884         list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1885                 crypto_unregister_ahash(&t_alg->ahash_alg);
1886                 list_del(&t_alg->entry);
1887                 kfree(t_alg);
1888         }
1889 }
1890
1891 static struct caam_hash_alg *
1892 caam_hash_alloc(struct caam_hash_template *template,
1893                 bool keyed)
1894 {
1895         struct caam_hash_alg *t_alg;
1896         struct ahash_alg *halg;
1897         struct crypto_alg *alg;
1898
1899         t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1900         if (!t_alg) {
1901                 pr_err("failed to allocate t_alg\n");
1902                 return ERR_PTR(-ENOMEM);
1903         }
1904
1905         t_alg->ahash_alg = template->template_ahash;
1906         halg = &t_alg->ahash_alg;
1907         alg = &halg->halg.base;
1908
1909         if (keyed) {
1910                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1911                          template->hmac_name);
1912                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1913                          template->hmac_driver_name);
1914         } else {
1915                 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916                          template->name);
1917                 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918                          template->driver_name);
1919                 t_alg->ahash_alg.setkey = NULL;
1920         }
1921         alg->cra_module = THIS_MODULE;
1922         alg->cra_init = caam_hash_cra_init;
1923         alg->cra_exit = caam_hash_cra_exit;
1924         alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1925         alg->cra_priority = CAAM_CRA_PRIORITY;
1926         alg->cra_blocksize = template->blocksize;
1927         alg->cra_alignmask = 0;
1928         alg->cra_flags = CRYPTO_ALG_ASYNC;
1929
1930         t_alg->alg_type = template->alg_type;
1931
1932         return t_alg;
1933 }
1934
1935 int caam_algapi_hash_init(struct device *ctrldev)
1936 {
1937         int i = 0, err = 0;
1938         struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1939         unsigned int md_limit = SHA512_DIGEST_SIZE;
1940         u32 md_inst, md_vid;
1941
1942         /*
1943          * Register crypto algorithms the device supports.  First, identify
1944          * presence and attributes of MD block.
1945          */
1946         if (priv->era < 10) {
1947                 md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
1948                           CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1949                 md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1950                            CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1951         } else {
1952                 u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
1953
1954                 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1955                 md_inst = mdha & CHA_VER_NUM_MASK;
1956         }
1957
1958         /*
1959          * Skip registration of any hashing algorithms if MD block
1960          * is not present.
1961          */
1962         if (!md_inst)
1963                 return 0;
1964
1965         /* Limit digest size based on LP256 */
1966         if (md_vid == CHA_VER_VID_MD_LP256)
1967                 md_limit = SHA256_DIGEST_SIZE;
1968
1969         INIT_LIST_HEAD(&hash_list);
1970
1971         /* register crypto algorithms the device supports */
1972         for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1973                 struct caam_hash_alg *t_alg;
1974                 struct caam_hash_template *alg = driver_hash + i;
1975
1976                 /* If MD size is not supported by device, skip registration */
1977                 if (is_mdha(alg->alg_type) &&
1978                     alg->template_ahash.halg.digestsize > md_limit)
1979                         continue;
1980
1981                 /* register hmac version */
1982                 t_alg = caam_hash_alloc(alg, true);
1983                 if (IS_ERR(t_alg)) {
1984                         err = PTR_ERR(t_alg);
1985                         pr_warn("%s alg allocation failed\n",
1986                                 alg->hmac_driver_name);
1987                         continue;
1988                 }
1989
1990                 err = crypto_register_ahash(&t_alg->ahash_alg);
1991                 if (err) {
1992                         pr_warn("%s alg registration failed: %d\n",
1993                                 t_alg->ahash_alg.halg.base.cra_driver_name,
1994                                 err);
1995                         kfree(t_alg);
1996                 } else
1997                         list_add_tail(&t_alg->entry, &hash_list);
1998
1999                 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2000                         continue;
2001
2002                 /* register unkeyed version */
2003                 t_alg = caam_hash_alloc(alg, false);
2004                 if (IS_ERR(t_alg)) {
2005                         err = PTR_ERR(t_alg);
2006                         pr_warn("%s alg allocation failed\n", alg->driver_name);
2007                         continue;
2008                 }
2009
2010                 err = crypto_register_ahash(&t_alg->ahash_alg);
2011                 if (err) {
2012                         pr_warn("%s alg registration failed: %d\n",
2013                                 t_alg->ahash_alg.halg.base.cra_driver_name,
2014                                 err);
2015                         kfree(t_alg);
2016                 } else
2017                         list_add_tail(&t_alg->entry, &hash_list);
2018         }
2019
2020         return err;
2021 }