1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019 NXP
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
16 #include "desc_constr.h"
17 #include "sg_sw_sec4.h"
20 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
21 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 SIZEOF_RSA_PRIV_F1_PDB)
23 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 SIZEOF_RSA_PRIV_F2_PDB)
25 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 SIZEOF_RSA_PRIV_F3_PDB)
27 #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
29 /* buffer filled with zeros, used for padding */
30 static u8 *zero_buffer;
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
36 static bool init_done;
38 struct caam_akcipher_alg {
39 struct akcipher_alg akcipher;
43 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 struct akcipher_request *req)
46 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
48 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
49 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
51 if (edesc->sec4_sg_bytes)
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
56 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 struct akcipher_request *req)
59 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
60 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
61 struct caam_rsa_key *key = &ctx->key;
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
64 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
68 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 struct akcipher_request *req)
71 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
72 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
73 struct caam_rsa_key *key = &ctx->key;
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
76 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
80 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 size_t p_sz = key->p_sz;
88 size_t q_sz = key->q_sz;
90 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
97 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 struct akcipher_request *req)
100 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
101 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
102 struct caam_rsa_key *key = &ctx->key;
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 size_t p_sz = key->p_sz;
105 size_t q_sz = key->q_sz;
107 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
112 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
116 /* RSA Job Completion handler */
117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
119 struct akcipher_request *req = context;
120 struct rsa_edesc *edesc;
124 ecode = caam_jr_strstatus(dev, err);
126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
128 rsa_pub_unmap(dev, edesc, req);
129 rsa_io_unmap(dev, edesc, req);
132 akcipher_request_complete(req, ecode);
135 static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
138 struct akcipher_request *req = context;
139 struct rsa_edesc *edesc;
143 ecode = caam_jr_strstatus(dev, err);
145 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
147 rsa_priv_f1_unmap(dev, edesc, req);
148 rsa_io_unmap(dev, edesc, req);
151 akcipher_request_complete(req, ecode);
154 static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
157 struct akcipher_request *req = context;
158 struct rsa_edesc *edesc;
162 ecode = caam_jr_strstatus(dev, err);
164 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
166 rsa_priv_f2_unmap(dev, edesc, req);
167 rsa_io_unmap(dev, edesc, req);
170 akcipher_request_complete(req, ecode);
173 static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
176 struct akcipher_request *req = context;
177 struct rsa_edesc *edesc;
181 ecode = caam_jr_strstatus(dev, err);
183 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
185 rsa_priv_f3_unmap(dev, edesc, req);
186 rsa_io_unmap(dev, edesc, req);
189 akcipher_request_complete(req, ecode);
193 * Count leading zeros, need it to strip, from a given scatterlist
195 * @sgl : scatterlist to count zeros from
196 * @nbytes: number of zeros, in bytes, to strip
197 * @flags : operation flags
199 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
203 struct sg_mapping_iter miter;
206 unsigned int tbytes = nbytes;
209 ents = sg_nents_for_len(sgl, nbytes);
213 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
218 /* do not strip more than given bytes */
219 while (len && !*buff && lzeros < nbytes) {
228 sg_miter_next(&miter);
236 miter.consumed = lzeros;
237 sg_miter_stop(&miter);
240 return tbytes - nbytes;
243 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
246 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
247 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
248 struct device *dev = ctx->dev;
249 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
250 struct caam_rsa_key *key = &ctx->key;
251 struct rsa_edesc *edesc;
252 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
253 GFP_KERNEL : GFP_ATOMIC;
254 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
255 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
256 int src_nents, dst_nents;
257 int mapped_src_nents, mapped_dst_nents;
258 unsigned int diff_size = 0;
261 if (req->src_len > key->n_sz) {
263 * strip leading zeros and
264 * return the number of zeros to skip
266 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
267 key->n_sz, sg_flags);
269 return ERR_PTR(lzeros);
271 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
273 req_ctx->fixup_src_len = req->src_len - lzeros;
276 * input src is less then n key modulus,
277 * so there will be zero padding
279 diff_size = key->n_sz - req->src_len;
280 req_ctx->fixup_src = req->src;
281 req_ctx->fixup_src_len = req->src_len;
284 src_nents = sg_nents_for_len(req_ctx->fixup_src,
285 req_ctx->fixup_src_len);
286 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
288 mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
290 if (unlikely(!mapped_src_nents)) {
291 dev_err(dev, "unable to map source\n");
292 return ERR_PTR(-ENOMEM);
294 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
296 if (unlikely(!mapped_dst_nents)) {
297 dev_err(dev, "unable to map destination\n");
301 if (!diff_size && mapped_src_nents == 1)
302 sec4_sg_len = 0; /* no need for an input hw s/g table */
304 sec4_sg_len = mapped_src_nents + !!diff_size;
305 sec4_sg_index = sec4_sg_len;
307 if (mapped_dst_nents > 1)
308 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
310 sec4_sg_len = pad_sg_nents(sec4_sg_len);
312 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
314 /* allocate space for base edesc, hw desc commands and link tables */
315 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
320 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
322 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
326 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
327 edesc->sec4_sg + !!diff_size, 0);
329 if (mapped_dst_nents > 1)
330 sg_to_sec4_sg_last(req->dst, req->dst_len,
331 edesc->sec4_sg + sec4_sg_index, 0);
333 /* Save nents for later use in Job Descriptor */
334 edesc->src_nents = src_nents;
335 edesc->dst_nents = dst_nents;
340 edesc->mapped_src_nents = mapped_src_nents;
341 edesc->mapped_dst_nents = mapped_dst_nents;
343 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
344 sec4_sg_bytes, DMA_TO_DEVICE);
345 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
346 dev_err(dev, "unable to map S/G table\n");
350 edesc->sec4_sg_bytes = sec4_sg_bytes;
352 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
354 edesc->sec4_sg_bytes, 1);
361 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
363 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
364 return ERR_PTR(-ENOMEM);
367 static int set_rsa_pub_pdb(struct akcipher_request *req,
368 struct rsa_edesc *edesc)
370 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
371 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
372 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
373 struct caam_rsa_key *key = &ctx->key;
374 struct device *dev = ctx->dev;
375 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
376 int sec4_sg_index = 0;
378 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
379 if (dma_mapping_error(dev, pdb->n_dma)) {
380 dev_err(dev, "Unable to map RSA modulus memory\n");
384 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
385 if (dma_mapping_error(dev, pdb->e_dma)) {
386 dev_err(dev, "Unable to map RSA public exponent memory\n");
387 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
391 if (edesc->mapped_src_nents > 1) {
392 pdb->sgf |= RSA_PDB_SGF_F;
393 pdb->f_dma = edesc->sec4_sg_dma;
394 sec4_sg_index += edesc->mapped_src_nents;
396 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
399 if (edesc->mapped_dst_nents > 1) {
400 pdb->sgf |= RSA_PDB_SGF_G;
401 pdb->g_dma = edesc->sec4_sg_dma +
402 sec4_sg_index * sizeof(struct sec4_sg_entry);
404 pdb->g_dma = sg_dma_address(req->dst);
407 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
408 pdb->f_len = req_ctx->fixup_src_len;
413 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
414 struct rsa_edesc *edesc)
416 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
417 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
418 struct caam_rsa_key *key = &ctx->key;
419 struct device *dev = ctx->dev;
420 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
421 int sec4_sg_index = 0;
423 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
424 if (dma_mapping_error(dev, pdb->n_dma)) {
425 dev_err(dev, "Unable to map modulus memory\n");
429 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
430 if (dma_mapping_error(dev, pdb->d_dma)) {
431 dev_err(dev, "Unable to map RSA private exponent memory\n");
432 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
436 if (edesc->mapped_src_nents > 1) {
437 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
438 pdb->g_dma = edesc->sec4_sg_dma;
439 sec4_sg_index += edesc->mapped_src_nents;
442 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
444 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
447 if (edesc->mapped_dst_nents > 1) {
448 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
449 pdb->f_dma = edesc->sec4_sg_dma +
450 sec4_sg_index * sizeof(struct sec4_sg_entry);
452 pdb->f_dma = sg_dma_address(req->dst);
455 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
460 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
461 struct rsa_edesc *edesc)
463 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
464 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
465 struct caam_rsa_key *key = &ctx->key;
466 struct device *dev = ctx->dev;
467 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
468 int sec4_sg_index = 0;
469 size_t p_sz = key->p_sz;
470 size_t q_sz = key->q_sz;
472 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
473 if (dma_mapping_error(dev, pdb->d_dma)) {
474 dev_err(dev, "Unable to map RSA private exponent memory\n");
478 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
479 if (dma_mapping_error(dev, pdb->p_dma)) {
480 dev_err(dev, "Unable to map RSA prime factor p memory\n");
484 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
485 if (dma_mapping_error(dev, pdb->q_dma)) {
486 dev_err(dev, "Unable to map RSA prime factor q memory\n");
490 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
491 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
492 dev_err(dev, "Unable to map RSA tmp1 memory\n");
496 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
497 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
498 dev_err(dev, "Unable to map RSA tmp2 memory\n");
502 if (edesc->mapped_src_nents > 1) {
503 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
504 pdb->g_dma = edesc->sec4_sg_dma;
505 sec4_sg_index += edesc->mapped_src_nents;
507 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
509 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
512 if (edesc->mapped_dst_nents > 1) {
513 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
514 pdb->f_dma = edesc->sec4_sg_dma +
515 sec4_sg_index * sizeof(struct sec4_sg_entry);
517 pdb->f_dma = sg_dma_address(req->dst);
520 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
521 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
526 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
528 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
530 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
532 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
537 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
538 struct rsa_edesc *edesc)
540 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
541 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
542 struct caam_rsa_key *key = &ctx->key;
543 struct device *dev = ctx->dev;
544 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
545 int sec4_sg_index = 0;
546 size_t p_sz = key->p_sz;
547 size_t q_sz = key->q_sz;
549 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
550 if (dma_mapping_error(dev, pdb->p_dma)) {
551 dev_err(dev, "Unable to map RSA prime factor p memory\n");
555 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
556 if (dma_mapping_error(dev, pdb->q_dma)) {
557 dev_err(dev, "Unable to map RSA prime factor q memory\n");
561 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
562 if (dma_mapping_error(dev, pdb->dp_dma)) {
563 dev_err(dev, "Unable to map RSA exponent dp memory\n");
567 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
568 if (dma_mapping_error(dev, pdb->dq_dma)) {
569 dev_err(dev, "Unable to map RSA exponent dq memory\n");
573 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
574 if (dma_mapping_error(dev, pdb->c_dma)) {
575 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
579 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
580 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
581 dev_err(dev, "Unable to map RSA tmp1 memory\n");
585 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
586 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
587 dev_err(dev, "Unable to map RSA tmp2 memory\n");
591 if (edesc->mapped_src_nents > 1) {
592 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
593 pdb->g_dma = edesc->sec4_sg_dma;
594 sec4_sg_index += edesc->mapped_src_nents;
596 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
598 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
601 if (edesc->mapped_dst_nents > 1) {
602 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
603 pdb->f_dma = edesc->sec4_sg_dma +
604 sec4_sg_index * sizeof(struct sec4_sg_entry);
606 pdb->f_dma = sg_dma_address(req->dst);
609 pdb->sgf |= key->n_sz;
610 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
615 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
617 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
619 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
621 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
623 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
625 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
630 static int caam_rsa_enc(struct akcipher_request *req)
632 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
633 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
634 struct caam_rsa_key *key = &ctx->key;
635 struct device *jrdev = ctx->dev;
636 struct rsa_edesc *edesc;
639 if (unlikely(!key->n || !key->e))
642 if (req->dst_len < key->n_sz) {
643 req->dst_len = key->n_sz;
644 dev_err(jrdev, "Output buffer length less than parameter n\n");
648 /* Allocate extended descriptor */
649 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
651 return PTR_ERR(edesc);
653 /* Set RSA Encrypt Protocol Data Block */
654 ret = set_rsa_pub_pdb(req, edesc);
658 /* Initialize Job Descriptor */
659 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
661 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
665 rsa_pub_unmap(jrdev, edesc, req);
668 rsa_io_unmap(jrdev, edesc, req);
673 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
675 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
676 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
677 struct device *jrdev = ctx->dev;
678 struct rsa_edesc *edesc;
681 /* Allocate extended descriptor */
682 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
684 return PTR_ERR(edesc);
686 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
687 ret = set_rsa_priv_f1_pdb(req, edesc);
691 /* Initialize Job Descriptor */
692 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
694 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
698 rsa_priv_f1_unmap(jrdev, edesc, req);
701 rsa_io_unmap(jrdev, edesc, req);
706 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
708 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
709 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
710 struct device *jrdev = ctx->dev;
711 struct rsa_edesc *edesc;
714 /* Allocate extended descriptor */
715 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
717 return PTR_ERR(edesc);
719 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
720 ret = set_rsa_priv_f2_pdb(req, edesc);
724 /* Initialize Job Descriptor */
725 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
727 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
731 rsa_priv_f2_unmap(jrdev, edesc, req);
734 rsa_io_unmap(jrdev, edesc, req);
739 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
741 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
742 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
743 struct device *jrdev = ctx->dev;
744 struct rsa_edesc *edesc;
747 /* Allocate extended descriptor */
748 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
750 return PTR_ERR(edesc);
752 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
753 ret = set_rsa_priv_f3_pdb(req, edesc);
757 /* Initialize Job Descriptor */
758 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
760 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
764 rsa_priv_f3_unmap(jrdev, edesc, req);
767 rsa_io_unmap(jrdev, edesc, req);
772 static int caam_rsa_dec(struct akcipher_request *req)
774 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
775 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
776 struct caam_rsa_key *key = &ctx->key;
779 if (unlikely(!key->n || !key->d))
782 if (req->dst_len < key->n_sz) {
783 req->dst_len = key->n_sz;
784 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
788 if (key->priv_form == FORM3)
789 ret = caam_rsa_dec_priv_f3(req);
790 else if (key->priv_form == FORM2)
791 ret = caam_rsa_dec_priv_f2(req);
793 ret = caam_rsa_dec_priv_f1(req);
798 static void caam_rsa_free_key(struct caam_rsa_key *key)
810 memset(key, 0, sizeof(*key));
813 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
815 while (!**ptr && *nbytes) {
822 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
823 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
824 * BER-encoding requires that the minimum number of bytes be used to encode the
825 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
828 * @ptr : pointer to {dP, dQ, qInv} CRT member
829 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
830 * @dstlen: length in bytes of corresponding p or q prime factor
832 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
836 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
840 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
844 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
850 * caam_read_raw_data - Read a raw byte stream as a positive integer.
851 * The function skips buffer's leading zeros, copies the remained data
852 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
853 * the address of the new buffer.
855 * @buf : The data to read
856 * @nbytes: The amount of data to read
858 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
861 caam_rsa_drop_leading_zeros(&buf, nbytes);
865 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
868 static int caam_rsa_check_key_length(unsigned int len)
875 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
878 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
879 struct rsa_key raw_key = {NULL};
880 struct caam_rsa_key *rsa_key = &ctx->key;
883 /* Free the old RSA key if any */
884 caam_rsa_free_key(rsa_key);
886 ret = rsa_parse_pub_key(&raw_key, key, keylen);
890 /* Copy key in DMA zone */
891 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
896 * Skip leading zeros and copy the positive integer to a buffer
897 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
898 * expects a positive integer for the RSA modulus and uses its length as
899 * decryption output length.
901 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
905 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
906 caam_rsa_free_key(rsa_key);
910 rsa_key->e_sz = raw_key.e_sz;
911 rsa_key->n_sz = raw_key.n_sz;
915 caam_rsa_free_key(rsa_key);
919 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
920 struct rsa_key *raw_key)
922 struct caam_rsa_key *rsa_key = &ctx->key;
923 size_t p_sz = raw_key->p_sz;
924 size_t q_sz = raw_key->q_sz;
926 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
929 rsa_key->p_sz = p_sz;
931 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
934 rsa_key->q_sz = q_sz;
936 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
940 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
944 rsa_key->priv_form = FORM2;
946 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
950 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
954 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
959 rsa_key->priv_form = FORM3;
968 kzfree(rsa_key->tmp2);
970 kzfree(rsa_key->tmp1);
977 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
980 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
981 struct rsa_key raw_key = {NULL};
982 struct caam_rsa_key *rsa_key = &ctx->key;
985 /* Free the old RSA key if any */
986 caam_rsa_free_key(rsa_key);
988 ret = rsa_parse_priv_key(&raw_key, key, keylen);
992 /* Copy key in DMA zone */
993 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
997 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1002 * Skip leading zeros and copy the positive integer to a buffer
1003 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1004 * expects a positive integer for the RSA modulus and uses its length as
1005 * decryption output length.
1007 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1011 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1012 caam_rsa_free_key(rsa_key);
1016 rsa_key->d_sz = raw_key.d_sz;
1017 rsa_key->e_sz = raw_key.e_sz;
1018 rsa_key->n_sz = raw_key.n_sz;
1020 caam_rsa_set_priv_key_form(ctx, &raw_key);
1025 caam_rsa_free_key(rsa_key);
1029 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1031 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1033 return ctx->key.n_sz;
1036 /* Per session pkc's driver context creation function */
1037 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1039 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1041 ctx->dev = caam_jr_alloc();
1043 if (IS_ERR(ctx->dev)) {
1044 pr_err("Job Ring Device allocation for transform failed\n");
1045 return PTR_ERR(ctx->dev);
1048 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1049 CAAM_RSA_MAX_INPUT_SIZE - 1,
1051 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1052 dev_err(ctx->dev, "unable to map padding\n");
1053 caam_jr_free(ctx->dev);
1060 /* Per session pkc's driver context cleanup function */
1061 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1063 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1064 struct caam_rsa_key *key = &ctx->key;
1066 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1068 caam_rsa_free_key(key);
1069 caam_jr_free(ctx->dev);
1072 static struct caam_akcipher_alg caam_rsa = {
1074 .encrypt = caam_rsa_enc,
1075 .decrypt = caam_rsa_dec,
1076 .set_pub_key = caam_rsa_set_pub_key,
1077 .set_priv_key = caam_rsa_set_priv_key,
1078 .max_size = caam_rsa_max_size,
1079 .init = caam_rsa_init_tfm,
1080 .exit = caam_rsa_exit_tfm,
1081 .reqsize = sizeof(struct caam_rsa_req_ctx),
1084 .cra_driver_name = "rsa-caam",
1085 .cra_priority = 3000,
1086 .cra_module = THIS_MODULE,
1087 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1092 /* Public Key Cryptography module initialization handler */
1093 int caam_pkc_init(struct device *ctrldev)
1095 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1100 /* Determine public key hardware accelerator presence. */
1102 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1103 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1105 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
1107 /* Do not register algorithms if PKHA is not present. */
1111 /* allocate zero buffer, used for padding input */
1112 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1117 err = crypto_register_akcipher(&caam_rsa.akcipher);
1121 dev_warn(ctrldev, "%s alg registration failed\n",
1122 caam_rsa.akcipher.base.cra_driver_name);
1125 caam_rsa.registered = true;
1126 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1132 void caam_pkc_exit(void)
1137 if (caam_rsa.registered)
1138 crypto_unregister_akcipher(&caam_rsa.akcipher);