1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
4 * Authors: Gonglei <arei.gonglei@huawei.com>
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <linux/err.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/atomic.h>
15 #include <uapi/linux/virtio_crypto.h>
16 #include "virtio_crypto_common.h"
19 struct virtio_crypto_ablkcipher_ctx {
20 struct crypto_engine_ctx enginectx;
21 struct virtio_crypto *vcrypto;
22 struct crypto_tfm *tfm;
24 struct virtio_crypto_sym_session_info enc_sess_info;
25 struct virtio_crypto_sym_session_info dec_sess_info;
28 struct virtio_crypto_sym_request {
29 struct virtio_crypto_request base;
33 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
34 struct ablkcipher_request *ablkcipher_req;
40 struct virtio_crypto_algo {
43 unsigned int active_devs;
44 struct crypto_alg algo;
48 * The algs_lock protects the below global virtio_crypto_active_devs
49 * and crypto algorithms registion.
51 static DEFINE_MUTEX(algs_lock);
52 static void virtio_crypto_ablkcipher_finalize_req(
53 struct virtio_crypto_sym_request *vc_sym_req,
54 struct ablkcipher_request *req,
57 static void virtio_crypto_dataq_sym_callback
58 (struct virtio_crypto_request *vc_req, int len)
60 struct virtio_crypto_sym_request *vc_sym_req =
61 container_of(vc_req, struct virtio_crypto_sym_request, base);
62 struct ablkcipher_request *ablk_req;
65 /* Finish the encrypt or decrypt process */
66 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
67 switch (vc_req->status) {
68 case VIRTIO_CRYPTO_OK:
71 case VIRTIO_CRYPTO_INVSESS:
72 case VIRTIO_CRYPTO_ERR:
75 case VIRTIO_CRYPTO_BADMSG:
82 ablk_req = vc_sym_req->ablkcipher_req;
83 virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
88 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
92 for (total = 0; sg; sg = sg_next(sg))
99 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
102 case AES_KEYSIZE_128:
103 case AES_KEYSIZE_192:
104 case AES_KEYSIZE_256:
105 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
108 pr_err("virtio_crypto: Unsupported key length: %d\n",
115 static int virtio_crypto_alg_ablkcipher_init_session(
116 struct virtio_crypto_ablkcipher_ctx *ctx,
117 uint32_t alg, const uint8_t *key,
121 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
123 struct virtio_crypto *vcrypto = ctx->vcrypto;
124 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
126 unsigned int num_out = 0, num_in = 0;
129 * Avoid to do DMA from the stack, switch to using
130 * dynamically-allocated for the key
132 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
137 spin_lock(&vcrypto->ctrl_lock);
138 /* Pad ctrl header */
139 vcrypto->ctrl.header.opcode =
140 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
141 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
142 /* Set the default dataqueue id to 0 */
143 vcrypto->ctrl.header.queue_id = 0;
145 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
146 /* Pad cipher's parameters */
147 vcrypto->ctrl.u.sym_create_session.op_type =
148 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
149 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
150 vcrypto->ctrl.header.algo;
151 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
153 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
156 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
157 sgs[num_out++] = &outhdr;
160 sg_init_one(&key_sg, cipher_key, keylen);
161 sgs[num_out++] = &key_sg;
163 /* Return status and session id back */
164 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
165 sgs[num_out + num_in++] = &inhdr;
167 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
168 num_in, vcrypto, GFP_ATOMIC);
170 spin_unlock(&vcrypto->ctrl_lock);
174 virtqueue_kick(vcrypto->ctrl_vq);
177 * Trapping into the hypervisor, so the request should be
178 * handled immediately.
180 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
181 !virtqueue_is_broken(vcrypto->ctrl_vq))
184 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
185 spin_unlock(&vcrypto->ctrl_lock);
186 pr_err("virtio_crypto: Create session failed status: %u\n",
187 le32_to_cpu(vcrypto->input.status));
193 ctx->enc_sess_info.session_id =
194 le64_to_cpu(vcrypto->input.session_id);
196 ctx->dec_sess_info.session_id =
197 le64_to_cpu(vcrypto->input.session_id);
199 spin_unlock(&vcrypto->ctrl_lock);
205 static int virtio_crypto_alg_ablkcipher_close_session(
206 struct virtio_crypto_ablkcipher_ctx *ctx,
209 struct scatterlist outhdr, status_sg, *sgs[2];
211 struct virtio_crypto_destroy_session_req *destroy_session;
212 struct virtio_crypto *vcrypto = ctx->vcrypto;
214 unsigned int num_out = 0, num_in = 0;
216 spin_lock(&vcrypto->ctrl_lock);
217 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
218 /* Pad ctrl header */
219 vcrypto->ctrl.header.opcode =
220 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
221 /* Set the default virtqueue id to 0 */
222 vcrypto->ctrl.header.queue_id = 0;
224 destroy_session = &vcrypto->ctrl.u.destroy_session;
227 destroy_session->session_id =
228 cpu_to_le64(ctx->enc_sess_info.session_id);
230 destroy_session->session_id =
231 cpu_to_le64(ctx->dec_sess_info.session_id);
233 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
234 sgs[num_out++] = &outhdr;
236 /* Return status and session id back */
237 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
238 sizeof(vcrypto->ctrl_status.status));
239 sgs[num_out + num_in++] = &status_sg;
241 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
242 num_in, vcrypto, GFP_ATOMIC);
244 spin_unlock(&vcrypto->ctrl_lock);
247 virtqueue_kick(vcrypto->ctrl_vq);
249 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
250 !virtqueue_is_broken(vcrypto->ctrl_vq))
253 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
254 spin_unlock(&vcrypto->ctrl_lock);
255 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
256 vcrypto->ctrl_status.status,
257 destroy_session->session_id);
261 spin_unlock(&vcrypto->ctrl_lock);
266 static int virtio_crypto_alg_ablkcipher_init_sessions(
267 struct virtio_crypto_ablkcipher_ctx *ctx,
268 const uint8_t *key, unsigned int keylen)
272 struct virtio_crypto *vcrypto = ctx->vcrypto;
274 if (keylen > vcrypto->max_cipher_key_len) {
275 pr_err("virtio_crypto: the key is too long\n");
279 if (virtio_crypto_alg_validate_key(keylen, &alg))
282 /* Create encryption session */
283 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
284 alg, key, keylen, 1);
287 /* Create decryption session */
288 ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
289 alg, key, keylen, 0);
291 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
297 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
301 /* Note: kernel crypto API realization */
302 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
306 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
310 ret = virtio_crypto_alg_validate_key(keylen, &alg);
316 int node = virtio_crypto_get_current_node();
317 struct virtio_crypto *vcrypto =
318 virtcrypto_get_dev_node(node,
319 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
321 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
325 ctx->vcrypto = vcrypto;
327 /* Rekeying, we should close the created sessions previously */
328 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
329 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
332 ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
334 virtcrypto_dev_put(ctx->vcrypto);
344 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
345 struct ablkcipher_request *req,
346 struct data_queue *data_vq)
348 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
349 struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
350 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
351 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
352 struct virtio_crypto *vcrypto = ctx->vcrypto;
353 struct virtio_crypto_op_data_req *req_data;
354 int src_nents, dst_nents;
357 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
360 unsigned int num_out = 0, num_in = 0;
364 src_nents = sg_nents_for_len(req->src, req->nbytes);
365 dst_nents = sg_nents(req->dst);
367 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
368 src_nents, dst_nents);
370 /* Why 3? outhdr + iv + inhdr */
371 sg_total = src_nents + dst_nents + 3;
372 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
373 dev_to_node(&vcrypto->vdev->dev));
377 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
378 dev_to_node(&vcrypto->vdev->dev));
384 vc_req->req_data = req_data;
385 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
386 /* Head of operation */
387 if (vc_sym_req->encrypt) {
388 req_data->header.session_id =
389 cpu_to_le64(ctx->enc_sess_info.session_id);
390 req_data->header.opcode =
391 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
393 req_data->header.session_id =
394 cpu_to_le64(ctx->dec_sess_info.session_id);
395 req_data->header.opcode =
396 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
398 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
399 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
400 req_data->u.sym_req.u.cipher.para.src_data_len =
401 cpu_to_le32(req->nbytes);
403 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
404 if (unlikely(dst_len > U32_MAX)) {
405 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
410 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
411 req->nbytes, dst_len);
413 if (unlikely(req->nbytes + dst_len + ivsize +
414 sizeof(vc_req->status) > vcrypto->max_size)) {
415 pr_err("virtio_crypto: The length is too big\n");
420 req_data->u.sym_req.u.cipher.para.dst_data_len =
421 cpu_to_le32((uint32_t)dst_len);
424 sg_init_one(&outhdr, req_data, sizeof(*req_data));
425 sgs[num_out++] = &outhdr;
430 * Avoid to do DMA from the stack, switch to using
431 * dynamically-allocated for the IV
433 iv = kzalloc_node(ivsize, GFP_ATOMIC,
434 dev_to_node(&vcrypto->vdev->dev));
439 memcpy(iv, req->info, ivsize);
440 sg_init_one(&iv_sg, iv, ivsize);
441 sgs[num_out++] = &iv_sg;
445 for (i = 0; i < src_nents; i++)
446 sgs[num_out++] = &req->src[i];
448 /* Destination data */
449 for (i = 0; i < dst_nents; i++)
450 sgs[num_out + num_in++] = &req->dst[i];
453 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
454 sgs[num_out + num_in++] = &status_sg;
458 spin_lock_irqsave(&data_vq->lock, flags);
459 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
460 num_in, vc_req, GFP_ATOMIC);
461 virtqueue_kick(data_vq->vq);
462 spin_unlock_irqrestore(&data_vq->lock, flags);
463 if (unlikely(err < 0))
476 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
478 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
479 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
480 struct virtio_crypto_sym_request *vc_sym_req =
481 ablkcipher_request_ctx(req);
482 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
483 struct virtio_crypto *vcrypto = ctx->vcrypto;
484 /* Use the first data virtqueue as default */
485 struct data_queue *data_vq = &vcrypto->data_vq[0];
487 vc_req->dataq = data_vq;
488 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
489 vc_sym_req->ablkcipher_ctx = ctx;
490 vc_sym_req->ablkcipher_req = req;
491 vc_sym_req->encrypt = true;
493 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
496 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
498 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
499 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
500 struct virtio_crypto_sym_request *vc_sym_req =
501 ablkcipher_request_ctx(req);
502 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
503 struct virtio_crypto *vcrypto = ctx->vcrypto;
504 /* Use the first data virtqueue as default */
505 struct data_queue *data_vq = &vcrypto->data_vq[0];
507 vc_req->dataq = data_vq;
508 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
509 vc_sym_req->ablkcipher_ctx = ctx;
510 vc_sym_req->ablkcipher_req = req;
511 vc_sym_req->encrypt = false;
513 return crypto_transfer_ablkcipher_request_to_engine(data_vq->engine, req);
516 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
518 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
520 tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
523 ctx->enginectx.op.do_one_request = virtio_crypto_ablkcipher_crypt_req;
524 ctx->enginectx.op.prepare_request = NULL;
525 ctx->enginectx.op.unprepare_request = NULL;
529 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
531 struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
536 virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
537 virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
538 virtcrypto_dev_put(ctx->vcrypto);
542 int virtio_crypto_ablkcipher_crypt_req(
543 struct crypto_engine *engine, void *vreq)
545 struct ablkcipher_request *req = container_of(vreq, struct ablkcipher_request, base);
546 struct virtio_crypto_sym_request *vc_sym_req =
547 ablkcipher_request_ctx(req);
548 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
549 struct data_queue *data_vq = vc_req->dataq;
552 ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
556 virtqueue_kick(data_vq->vq);
561 static void virtio_crypto_ablkcipher_finalize_req(
562 struct virtio_crypto_sym_request *vc_sym_req,
563 struct ablkcipher_request *req,
566 crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
568 kzfree(vc_sym_req->iv);
569 virtcrypto_clear_request(&vc_sym_req->base);
572 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
573 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
574 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
576 .cra_name = "cbc(aes)",
577 .cra_driver_name = "virtio_crypto_aes_cbc",
579 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
580 .cra_blocksize = AES_BLOCK_SIZE,
581 .cra_ctxsize = sizeof(struct virtio_crypto_ablkcipher_ctx),
583 .cra_module = THIS_MODULE,
584 .cra_type = &crypto_ablkcipher_type,
585 .cra_init = virtio_crypto_ablkcipher_init,
586 .cra_exit = virtio_crypto_ablkcipher_exit,
589 .setkey = virtio_crypto_ablkcipher_setkey,
590 .decrypt = virtio_crypto_ablkcipher_decrypt,
591 .encrypt = virtio_crypto_ablkcipher_encrypt,
592 .min_keysize = AES_MIN_KEY_SIZE,
593 .max_keysize = AES_MAX_KEY_SIZE,
594 .ivsize = AES_BLOCK_SIZE,
600 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
605 mutex_lock(&algs_lock);
607 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
609 uint32_t service = virtio_crypto_algs[i].service;
610 uint32_t algonum = virtio_crypto_algs[i].algonum;
612 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
615 if (virtio_crypto_algs[i].active_devs == 0) {
616 ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
621 virtio_crypto_algs[i].active_devs++;
622 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
623 virtio_crypto_algs[i].algo.cra_name);
627 mutex_unlock(&algs_lock);
631 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
635 mutex_lock(&algs_lock);
637 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
639 uint32_t service = virtio_crypto_algs[i].service;
640 uint32_t algonum = virtio_crypto_algs[i].algonum;
642 if (virtio_crypto_algs[i].active_devs == 0 ||
643 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
646 if (virtio_crypto_algs[i].active_devs == 1)
647 crypto_unregister_alg(&virtio_crypto_algs[i].algo);
649 virtio_crypto_algs[i].active_devs--;
652 mutex_unlock(&algs_lock);