1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha.h>
19 #include <crypto/aes.h>
20 #include <crypto/internal/des.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/sched.h>
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/algapi.h>
30 #include <asm/hypervisor.h>
31 #include <asm/mdesc.h>
35 #define DRV_MODULE_NAME "n2_crypto"
36 #define DRV_MODULE_VERSION "0.2"
37 #define DRV_MODULE_RELDATE "July 28, 2011"
39 static const char version[] =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Niagara2 Crypto driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
47 #define N2_CRA_PRIORITY 200
49 static DEFINE_MUTEX(spu_lock);
53 unsigned long qhandle;
60 struct list_head jobs;
67 struct list_head list;
71 struct spu_queue *queue;
75 static struct spu_queue **cpu_to_cwq;
76 static struct spu_queue **cpu_to_mau;
78 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
80 if (q->q_type == HV_NCS_QTYPE_MAU) {
81 off += MAU_ENTRY_SIZE;
82 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
85 off += CWQ_ENTRY_SIZE;
86 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
92 struct n2_request_common {
93 struct list_head entry;
96 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
98 /* An async job request records the final tail value it used in
99 * n2_request_common->offset, test to see if that offset is in
100 * the range old_head, new_head, inclusive.
102 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
103 unsigned long old_head, unsigned long new_head)
105 if (old_head <= new_head) {
106 if (offset > old_head && offset <= new_head)
109 if (offset > old_head || offset <= new_head)
115 /* When the HEAD marker is unequal to the actual HEAD, we get
116 * a virtual device INO interrupt. We should process the
117 * completed CWQ entries and adjust the HEAD marker to clear
120 static irqreturn_t cwq_intr(int irq, void *dev_id)
122 unsigned long off, new_head, hv_ret;
123 struct spu_queue *q = dev_id;
125 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
126 smp_processor_id(), q->qhandle);
130 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
132 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
133 smp_processor_id(), new_head, hv_ret);
135 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
139 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
140 if (hv_ret == HV_EOK)
143 spin_unlock(&q->lock);
148 static irqreturn_t mau_intr(int irq, void *dev_id)
150 struct spu_queue *q = dev_id;
151 unsigned long head, hv_ret;
155 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
156 smp_processor_id(), q->qhandle);
158 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
160 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
161 smp_processor_id(), head, hv_ret);
163 sun4v_ncs_sethead_marker(q->qhandle, head);
165 spin_unlock(&q->lock);
170 static void *spu_queue_next(struct spu_queue *q, void *cur)
172 return q->q + spu_next_offset(q, cur - q->q);
175 static int spu_queue_num_free(struct spu_queue *q)
177 unsigned long head = q->head;
178 unsigned long tail = q->tail;
179 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
185 diff = (end - tail) + head;
187 return (diff / CWQ_ENTRY_SIZE) - 1;
190 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
192 int avail = spu_queue_num_free(q);
194 if (avail >= num_entries)
195 return q->q + q->tail;
200 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
202 unsigned long hv_ret, new_tail;
204 new_tail = spu_next_offset(q, last - q->q);
206 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
207 if (hv_ret == HV_EOK)
212 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
213 int enc_type, int auth_type,
214 unsigned int hash_len,
215 bool sfas, bool sob, bool eob, bool encrypt,
218 u64 word = (len - 1) & CONTROL_LEN;
220 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
221 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
222 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
224 word |= CONTROL_STORE_FINAL_AUTH_STATE;
226 word |= CONTROL_START_OF_BLOCK;
228 word |= CONTROL_END_OF_BLOCK;
230 word |= CONTROL_ENCRYPT;
232 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
234 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
240 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
242 if (this_len >= 64 ||
243 qp->head != qp->tail)
249 struct n2_ahash_alg {
250 struct list_head entry;
252 const u32 *hash_init;
257 struct ahash_alg alg;
260 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
262 struct crypto_alg *alg = tfm->__crt_alg;
263 struct ahash_alg *ahash_alg;
265 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
267 return container_of(ahash_alg, struct n2_ahash_alg, alg);
271 const char *child_alg;
272 struct n2_ahash_alg derived;
275 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
277 struct crypto_alg *alg = tfm->__crt_alg;
278 struct ahash_alg *ahash_alg;
280 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
282 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
286 struct crypto_ahash *fallback_tfm;
289 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
292 struct n2_hash_ctx base;
294 struct crypto_shash *child_shash;
297 unsigned char hash_key[N2_HASH_KEY_MAX];
300 struct n2_hash_req_ctx {
302 struct md5_state md5;
303 struct sha1_state sha1;
304 struct sha256_state sha256;
307 struct ahash_request fallback_req;
310 static int n2_hash_async_init(struct ahash_request *req)
312 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
313 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
314 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
316 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
317 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
319 return crypto_ahash_init(&rctx->fallback_req);
322 static int n2_hash_async_update(struct ahash_request *req)
324 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
328 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
329 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
330 rctx->fallback_req.nbytes = req->nbytes;
331 rctx->fallback_req.src = req->src;
333 return crypto_ahash_update(&rctx->fallback_req);
336 static int n2_hash_async_final(struct ahash_request *req)
338 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
339 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
340 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
342 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
343 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
344 rctx->fallback_req.result = req->result;
346 return crypto_ahash_final(&rctx->fallback_req);
349 static int n2_hash_async_finup(struct ahash_request *req)
351 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
352 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
353 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
355 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
356 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
357 rctx->fallback_req.nbytes = req->nbytes;
358 rctx->fallback_req.src = req->src;
359 rctx->fallback_req.result = req->result;
361 return crypto_ahash_finup(&rctx->fallback_req);
364 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
369 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
374 static int n2_hash_cra_init(struct crypto_tfm *tfm)
376 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
377 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
378 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
379 struct crypto_ahash *fallback_tfm;
382 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
383 CRYPTO_ALG_NEED_FALLBACK);
384 if (IS_ERR(fallback_tfm)) {
385 pr_warn("Fallback driver '%s' could not be loaded!\n",
386 fallback_driver_name);
387 err = PTR_ERR(fallback_tfm);
391 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
392 crypto_ahash_reqsize(fallback_tfm)));
394 ctx->fallback_tfm = fallback_tfm;
401 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
403 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
404 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
406 crypto_free_ahash(ctx->fallback_tfm);
409 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
411 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
412 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
413 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
414 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
415 struct crypto_ahash *fallback_tfm;
416 struct crypto_shash *child_shash;
419 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
420 CRYPTO_ALG_NEED_FALLBACK);
421 if (IS_ERR(fallback_tfm)) {
422 pr_warn("Fallback driver '%s' could not be loaded!\n",
423 fallback_driver_name);
424 err = PTR_ERR(fallback_tfm);
428 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
429 if (IS_ERR(child_shash)) {
430 pr_warn("Child shash '%s' could not be loaded!\n",
432 err = PTR_ERR(child_shash);
433 goto out_free_fallback;
436 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
437 crypto_ahash_reqsize(fallback_tfm)));
439 ctx->child_shash = child_shash;
440 ctx->base.fallback_tfm = fallback_tfm;
444 crypto_free_ahash(fallback_tfm);
450 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
452 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
453 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
455 crypto_free_ahash(ctx->base.fallback_tfm);
456 crypto_free_shash(ctx->child_shash);
459 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
462 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
463 struct crypto_shash *child_shash = ctx->child_shash;
464 struct crypto_ahash *fallback_tfm;
465 SHASH_DESC_ON_STACK(shash, child_shash);
468 fallback_tfm = ctx->base.fallback_tfm;
469 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
473 shash->tfm = child_shash;
475 bs = crypto_shash_blocksize(child_shash);
476 ds = crypto_shash_digestsize(child_shash);
477 BUG_ON(ds > N2_HASH_KEY_MAX);
479 err = crypto_shash_digest(shash, key, keylen,
484 } else if (keylen <= N2_HASH_KEY_MAX)
485 memcpy(ctx->hash_key, key, keylen);
487 ctx->hash_key_len = keylen;
492 static unsigned long wait_for_tail(struct spu_queue *qp)
494 unsigned long head, hv_ret;
497 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
498 if (hv_ret != HV_EOK) {
499 pr_err("Hypervisor error on gethead\n");
502 if (head == qp->tail) {
510 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
511 struct cwq_initial_entry *ent)
513 unsigned long hv_ret = spu_queue_submit(qp, ent);
515 if (hv_ret == HV_EOK)
516 hv_ret = wait_for_tail(qp);
521 static int n2_do_async_digest(struct ahash_request *req,
522 unsigned int auth_type, unsigned int digest_size,
523 unsigned int result_size, void *hash_loc,
524 unsigned long auth_key, unsigned int auth_key_len)
526 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
527 struct cwq_initial_entry *ent;
528 struct crypto_hash_walk walk;
529 struct spu_queue *qp;
534 /* The total effective length of the operation may not
537 if (unlikely(req->nbytes > (1 << 16))) {
538 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
539 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
541 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
542 rctx->fallback_req.base.flags =
543 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
544 rctx->fallback_req.nbytes = req->nbytes;
545 rctx->fallback_req.src = req->src;
546 rctx->fallback_req.result = req->result;
548 return crypto_ahash_digest(&rctx->fallback_req);
551 nbytes = crypto_hash_walk_first(req, &walk);
554 qp = cpu_to_cwq[cpu];
558 spin_lock_irqsave(&qp->lock, flags);
560 /* XXX can do better, improve this later by doing a by-hand scatterlist
563 ent = qp->q + qp->tail;
565 ent->control = control_word_base(nbytes, auth_key_len, 0,
566 auth_type, digest_size,
567 false, true, false, false,
570 ent->src_addr = __pa(walk.data);
571 ent->auth_key_addr = auth_key;
572 ent->auth_iv_addr = __pa(hash_loc);
573 ent->final_auth_state_addr = 0UL;
574 ent->enc_key_addr = 0UL;
575 ent->enc_iv_addr = 0UL;
576 ent->dest_addr = __pa(hash_loc);
578 nbytes = crypto_hash_walk_done(&walk, 0);
580 ent = spu_queue_next(qp, ent);
582 ent->control = (nbytes - 1);
583 ent->src_addr = __pa(walk.data);
584 ent->auth_key_addr = 0UL;
585 ent->auth_iv_addr = 0UL;
586 ent->final_auth_state_addr = 0UL;
587 ent->enc_key_addr = 0UL;
588 ent->enc_iv_addr = 0UL;
589 ent->dest_addr = 0UL;
591 nbytes = crypto_hash_walk_done(&walk, 0);
593 ent->control |= CONTROL_END_OF_BLOCK;
595 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
600 spin_unlock_irqrestore(&qp->lock, flags);
603 memcpy(req->result, hash_loc, result_size);
610 static int n2_hash_async_digest(struct ahash_request *req)
612 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
613 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
616 ds = n2alg->digest_size;
617 if (unlikely(req->nbytes == 0)) {
618 memcpy(req->result, n2alg->hash_zero, ds);
621 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
623 return n2_do_async_digest(req, n2alg->auth_type,
624 n2alg->hw_op_hashsz, ds,
628 static int n2_hmac_async_digest(struct ahash_request *req)
630 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
631 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
632 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
633 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
636 ds = n2alg->derived.digest_size;
637 if (unlikely(req->nbytes == 0) ||
638 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
639 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
640 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
642 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
643 rctx->fallback_req.base.flags =
644 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
645 rctx->fallback_req.nbytes = req->nbytes;
646 rctx->fallback_req.src = req->src;
647 rctx->fallback_req.result = req->result;
649 return crypto_ahash_digest(&rctx->fallback_req);
651 memcpy(&rctx->u, n2alg->derived.hash_init,
652 n2alg->derived.hw_op_hashsz);
654 return n2_do_async_digest(req, n2alg->derived.hmac_type,
655 n2alg->derived.hw_op_hashsz, ds,
657 __pa(&ctx->hash_key),
661 struct n2_skcipher_context {
665 u8 aes[AES_MAX_KEY_SIZE];
666 u8 des[DES_KEY_SIZE];
667 u8 des3[3 * DES_KEY_SIZE];
668 u8 arc4[258]; /* S-box, X, Y */
672 #define N2_CHUNK_ARR_LEN 16
674 struct n2_crypto_chunk {
675 struct list_head entry;
676 unsigned long iv_paddr : 44;
677 unsigned long arr_len : 20;
678 unsigned long dest_paddr;
679 unsigned long dest_final;
681 unsigned long src_paddr : 44;
682 unsigned long src_len : 20;
683 } arr[N2_CHUNK_ARR_LEN];
686 struct n2_request_context {
687 struct skcipher_walk walk;
688 struct list_head chunk_list;
689 struct n2_crypto_chunk chunk;
693 /* The SPU allows some level of flexibility for partial cipher blocks
694 * being specified in a descriptor.
696 * It merely requires that every descriptor's length field is at least
697 * as large as the cipher block size. This means that a cipher block
698 * can span at most 2 descriptors. However, this does not allow a
699 * partial block to span into the final descriptor as that would
700 * violate the rule (since every descriptor's length must be at lest
701 * the block size). So, for example, assuming an 8 byte block size:
703 * 0xe --> 0xa --> 0x8
705 * is a valid length sequence, whereas:
707 * 0xe --> 0xb --> 0x7
709 * is not a valid sequence.
712 struct n2_skcipher_alg {
713 struct list_head entry;
715 struct skcipher_alg skcipher;
718 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
720 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
722 return container_of(alg, struct n2_skcipher_alg, skcipher);
725 struct n2_skcipher_request_context {
726 struct skcipher_walk walk;
729 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
732 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
733 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
734 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
736 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
739 case AES_KEYSIZE_128:
740 ctx->enc_type |= ENC_TYPE_ALG_AES128;
742 case AES_KEYSIZE_192:
743 ctx->enc_type |= ENC_TYPE_ALG_AES192;
745 case AES_KEYSIZE_256:
746 ctx->enc_type |= ENC_TYPE_ALG_AES256;
749 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
753 ctx->key_len = keylen;
754 memcpy(ctx->key.aes, key, keylen);
758 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
761 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
762 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
763 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
766 err = verify_skcipher_des_key(skcipher, key);
770 ctx->enc_type = n2alg->enc_type;
772 ctx->key_len = keylen;
773 memcpy(ctx->key.des, key, keylen);
777 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
780 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
781 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
782 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
785 err = verify_skcipher_des3_key(skcipher, key);
789 ctx->enc_type = n2alg->enc_type;
791 ctx->key_len = keylen;
792 memcpy(ctx->key.des3, key, keylen);
796 static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
799 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
800 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
801 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
802 u8 *s = ctx->key.arc4;
807 ctx->enc_type = n2alg->enc_type;
812 for (i = 0; i < 256; i++)
814 for (i = 0; i < 256; i++) {
816 j = (j + key[k] + a) & 0xff;
826 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
828 int this_len = nbytes;
830 this_len -= (nbytes & (block_size - 1));
831 return this_len > (1 << 16) ? (1 << 16) : this_len;
834 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
835 struct n2_crypto_chunk *cp,
836 struct spu_queue *qp, bool encrypt)
838 struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
839 struct cwq_initial_entry *ent;
843 ent = spu_queue_alloc(qp, cp->arr_len);
845 pr_info("queue_alloc() of %d fails\n",
850 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
852 ent->control = control_word_base(cp->arr[0].src_len,
853 0, ctx->enc_type, 0, 0,
854 false, true, false, encrypt,
856 (in_place ? OPCODE_INPLACE_BIT : 0));
857 ent->src_addr = cp->arr[0].src_paddr;
858 ent->auth_key_addr = 0UL;
859 ent->auth_iv_addr = 0UL;
860 ent->final_auth_state_addr = 0UL;
861 ent->enc_key_addr = __pa(&ctx->key);
862 ent->enc_iv_addr = cp->iv_paddr;
863 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
865 for (i = 1; i < cp->arr_len; i++) {
866 ent = spu_queue_next(qp, ent);
868 ent->control = cp->arr[i].src_len - 1;
869 ent->src_addr = cp->arr[i].src_paddr;
870 ent->auth_key_addr = 0UL;
871 ent->auth_iv_addr = 0UL;
872 ent->final_auth_state_addr = 0UL;
873 ent->enc_key_addr = 0UL;
874 ent->enc_iv_addr = 0UL;
875 ent->dest_addr = 0UL;
877 ent->control |= CONTROL_END_OF_BLOCK;
879 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
882 static int n2_compute_chunks(struct skcipher_request *req)
884 struct n2_request_context *rctx = skcipher_request_ctx(req);
885 struct skcipher_walk *walk = &rctx->walk;
886 struct n2_crypto_chunk *chunk;
887 unsigned long dest_prev;
888 unsigned int tot_len;
892 err = skcipher_walk_async(walk, req);
896 INIT_LIST_HEAD(&rctx->chunk_list);
898 chunk = &rctx->chunk;
899 INIT_LIST_HEAD(&chunk->entry);
901 chunk->iv_paddr = 0UL;
903 chunk->dest_paddr = 0UL;
905 prev_in_place = false;
909 while ((nbytes = walk->nbytes) != 0) {
910 unsigned long dest_paddr, src_paddr;
914 src_paddr = (page_to_phys(walk->src.phys.page) +
915 walk->src.phys.offset);
916 dest_paddr = (page_to_phys(walk->dst.phys.page) +
917 walk->dst.phys.offset);
918 in_place = (src_paddr == dest_paddr);
919 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
921 if (chunk->arr_len != 0) {
922 if (in_place != prev_in_place ||
924 dest_paddr != dest_prev) ||
925 chunk->arr_len == N2_CHUNK_ARR_LEN ||
926 tot_len + this_len > (1 << 16)) {
927 chunk->dest_final = dest_prev;
928 list_add_tail(&chunk->entry,
930 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
935 INIT_LIST_HEAD(&chunk->entry);
938 if (chunk->arr_len == 0) {
939 chunk->dest_paddr = dest_paddr;
942 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
943 chunk->arr[chunk->arr_len].src_len = this_len;
946 dest_prev = dest_paddr + this_len;
947 prev_in_place = in_place;
950 err = skcipher_walk_done(walk, nbytes - this_len);
954 if (!err && chunk->arr_len != 0) {
955 chunk->dest_final = dest_prev;
956 list_add_tail(&chunk->entry, &rctx->chunk_list);
962 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
964 struct n2_request_context *rctx = skcipher_request_ctx(req);
965 struct n2_crypto_chunk *c, *tmp;
968 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
970 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
972 if (unlikely(c != &rctx->chunk))
978 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
980 struct n2_request_context *rctx = skcipher_request_ctx(req);
981 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
982 int err = n2_compute_chunks(req);
983 struct n2_crypto_chunk *c, *tmp;
984 unsigned long flags, hv_ret;
985 struct spu_queue *qp;
990 qp = cpu_to_cwq[get_cpu()];
995 spin_lock_irqsave(&qp->lock, flags);
997 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
998 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
1001 list_del(&c->entry);
1002 if (unlikely(c != &rctx->chunk))
1006 hv_ret = wait_for_tail(qp);
1007 if (hv_ret != HV_EOK)
1011 spin_unlock_irqrestore(&qp->lock, flags);
1016 n2_chunk_complete(req, NULL);
1020 static int n2_encrypt_ecb(struct skcipher_request *req)
1022 return n2_do_ecb(req, true);
1025 static int n2_decrypt_ecb(struct skcipher_request *req)
1027 return n2_do_ecb(req, false);
1030 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
1032 struct n2_request_context *rctx = skcipher_request_ctx(req);
1033 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1034 unsigned long flags, hv_ret, iv_paddr;
1035 int err = n2_compute_chunks(req);
1036 struct n2_crypto_chunk *c, *tmp;
1037 struct spu_queue *qp;
1038 void *final_iv_addr;
1040 final_iv_addr = NULL;
1045 qp = cpu_to_cwq[get_cpu()];
1050 spin_lock_irqsave(&qp->lock, flags);
1053 iv_paddr = __pa(rctx->walk.iv);
1054 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1056 c->iv_paddr = iv_paddr;
1057 err = __n2_crypt_chunk(tfm, c, qp, true);
1060 iv_paddr = c->dest_final - rctx->walk.blocksize;
1061 list_del(&c->entry);
1062 if (unlikely(c != &rctx->chunk))
1065 final_iv_addr = __va(iv_paddr);
1067 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1069 if (c == &rctx->chunk) {
1070 iv_paddr = __pa(rctx->walk.iv);
1072 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1073 tmp->arr[tmp->arr_len-1].src_len -
1074 rctx->walk.blocksize);
1076 if (!final_iv_addr) {
1079 pa = (c->arr[c->arr_len-1].src_paddr +
1080 c->arr[c->arr_len-1].src_len -
1081 rctx->walk.blocksize);
1082 final_iv_addr = rctx->temp_iv;
1083 memcpy(rctx->temp_iv, __va(pa),
1084 rctx->walk.blocksize);
1086 c->iv_paddr = iv_paddr;
1087 err = __n2_crypt_chunk(tfm, c, qp, false);
1090 list_del(&c->entry);
1091 if (unlikely(c != &rctx->chunk))
1096 hv_ret = wait_for_tail(qp);
1097 if (hv_ret != HV_EOK)
1101 spin_unlock_irqrestore(&qp->lock, flags);
1106 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1110 static int n2_encrypt_chaining(struct skcipher_request *req)
1112 return n2_do_chaining(req, true);
1115 static int n2_decrypt_chaining(struct skcipher_request *req)
1117 return n2_do_chaining(req, false);
1120 struct n2_skcipher_tmpl {
1122 const char *drv_name;
1125 struct skcipher_alg skcipher;
1128 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1129 /* ARC4: only ECB is supported (chaining bits ignored) */
1130 { .name = "ecb(arc4)",
1131 .drv_name = "ecb-arc4",
1133 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1134 ENC_TYPE_CHAINING_ECB),
1138 .setkey = n2_arc4_setkey,
1139 .encrypt = n2_encrypt_ecb,
1140 .decrypt = n2_decrypt_ecb,
1144 /* DES: ECB CBC and CFB are supported */
1145 { .name = "ecb(des)",
1146 .drv_name = "ecb-des",
1147 .block_size = DES_BLOCK_SIZE,
1148 .enc_type = (ENC_TYPE_ALG_DES |
1149 ENC_TYPE_CHAINING_ECB),
1151 .min_keysize = DES_KEY_SIZE,
1152 .max_keysize = DES_KEY_SIZE,
1153 .setkey = n2_des_setkey,
1154 .encrypt = n2_encrypt_ecb,
1155 .decrypt = n2_decrypt_ecb,
1158 { .name = "cbc(des)",
1159 .drv_name = "cbc-des",
1160 .block_size = DES_BLOCK_SIZE,
1161 .enc_type = (ENC_TYPE_ALG_DES |
1162 ENC_TYPE_CHAINING_CBC),
1164 .ivsize = DES_BLOCK_SIZE,
1165 .min_keysize = DES_KEY_SIZE,
1166 .max_keysize = DES_KEY_SIZE,
1167 .setkey = n2_des_setkey,
1168 .encrypt = n2_encrypt_chaining,
1169 .decrypt = n2_decrypt_chaining,
1172 { .name = "cfb(des)",
1173 .drv_name = "cfb-des",
1174 .block_size = DES_BLOCK_SIZE,
1175 .enc_type = (ENC_TYPE_ALG_DES |
1176 ENC_TYPE_CHAINING_CFB),
1178 .min_keysize = DES_KEY_SIZE,
1179 .max_keysize = DES_KEY_SIZE,
1180 .setkey = n2_des_setkey,
1181 .encrypt = n2_encrypt_chaining,
1182 .decrypt = n2_decrypt_chaining,
1186 /* 3DES: ECB CBC and CFB are supported */
1187 { .name = "ecb(des3_ede)",
1188 .drv_name = "ecb-3des",
1189 .block_size = DES_BLOCK_SIZE,
1190 .enc_type = (ENC_TYPE_ALG_3DES |
1191 ENC_TYPE_CHAINING_ECB),
1193 .min_keysize = 3 * DES_KEY_SIZE,
1194 .max_keysize = 3 * DES_KEY_SIZE,
1195 .setkey = n2_3des_setkey,
1196 .encrypt = n2_encrypt_ecb,
1197 .decrypt = n2_decrypt_ecb,
1200 { .name = "cbc(des3_ede)",
1201 .drv_name = "cbc-3des",
1202 .block_size = DES_BLOCK_SIZE,
1203 .enc_type = (ENC_TYPE_ALG_3DES |
1204 ENC_TYPE_CHAINING_CBC),
1206 .ivsize = DES_BLOCK_SIZE,
1207 .min_keysize = 3 * DES_KEY_SIZE,
1208 .max_keysize = 3 * DES_KEY_SIZE,
1209 .setkey = n2_3des_setkey,
1210 .encrypt = n2_encrypt_chaining,
1211 .decrypt = n2_decrypt_chaining,
1214 { .name = "cfb(des3_ede)",
1215 .drv_name = "cfb-3des",
1216 .block_size = DES_BLOCK_SIZE,
1217 .enc_type = (ENC_TYPE_ALG_3DES |
1218 ENC_TYPE_CHAINING_CFB),
1220 .min_keysize = 3 * DES_KEY_SIZE,
1221 .max_keysize = 3 * DES_KEY_SIZE,
1222 .setkey = n2_3des_setkey,
1223 .encrypt = n2_encrypt_chaining,
1224 .decrypt = n2_decrypt_chaining,
1227 /* AES: ECB CBC and CTR are supported */
1228 { .name = "ecb(aes)",
1229 .drv_name = "ecb-aes",
1230 .block_size = AES_BLOCK_SIZE,
1231 .enc_type = (ENC_TYPE_ALG_AES128 |
1232 ENC_TYPE_CHAINING_ECB),
1234 .min_keysize = AES_MIN_KEY_SIZE,
1235 .max_keysize = AES_MAX_KEY_SIZE,
1236 .setkey = n2_aes_setkey,
1237 .encrypt = n2_encrypt_ecb,
1238 .decrypt = n2_decrypt_ecb,
1241 { .name = "cbc(aes)",
1242 .drv_name = "cbc-aes",
1243 .block_size = AES_BLOCK_SIZE,
1244 .enc_type = (ENC_TYPE_ALG_AES128 |
1245 ENC_TYPE_CHAINING_CBC),
1247 .ivsize = AES_BLOCK_SIZE,
1248 .min_keysize = AES_MIN_KEY_SIZE,
1249 .max_keysize = AES_MAX_KEY_SIZE,
1250 .setkey = n2_aes_setkey,
1251 .encrypt = n2_encrypt_chaining,
1252 .decrypt = n2_decrypt_chaining,
1255 { .name = "ctr(aes)",
1256 .drv_name = "ctr-aes",
1257 .block_size = AES_BLOCK_SIZE,
1258 .enc_type = (ENC_TYPE_ALG_AES128 |
1259 ENC_TYPE_CHAINING_COUNTER),
1261 .ivsize = AES_BLOCK_SIZE,
1262 .min_keysize = AES_MIN_KEY_SIZE,
1263 .max_keysize = AES_MAX_KEY_SIZE,
1264 .setkey = n2_aes_setkey,
1265 .encrypt = n2_encrypt_chaining,
1266 .decrypt = n2_encrypt_chaining,
1271 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1273 static LIST_HEAD(skcipher_algs);
1275 struct n2_hash_tmpl {
1277 const u8 *hash_zero;
1278 const u32 *hash_init;
1286 static const u32 n2_md5_init[MD5_HASH_WORDS] = {
1287 cpu_to_le32(MD5_H0),
1288 cpu_to_le32(MD5_H1),
1289 cpu_to_le32(MD5_H2),
1290 cpu_to_le32(MD5_H3),
1292 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1293 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1295 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1296 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1297 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1299 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1300 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1301 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1304 static const struct n2_hash_tmpl hash_tmpls[] = {
1306 .hash_zero = md5_zero_message_hash,
1307 .hash_init = n2_md5_init,
1308 .auth_type = AUTH_TYPE_MD5,
1309 .hmac_type = AUTH_TYPE_HMAC_MD5,
1310 .hw_op_hashsz = MD5_DIGEST_SIZE,
1311 .digest_size = MD5_DIGEST_SIZE,
1312 .block_size = MD5_HMAC_BLOCK_SIZE },
1314 .hash_zero = sha1_zero_message_hash,
1315 .hash_init = n2_sha1_init,
1316 .auth_type = AUTH_TYPE_SHA1,
1317 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1318 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1319 .digest_size = SHA1_DIGEST_SIZE,
1320 .block_size = SHA1_BLOCK_SIZE },
1322 .hash_zero = sha256_zero_message_hash,
1323 .hash_init = n2_sha256_init,
1324 .auth_type = AUTH_TYPE_SHA256,
1325 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1326 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1327 .digest_size = SHA256_DIGEST_SIZE,
1328 .block_size = SHA256_BLOCK_SIZE },
1330 .hash_zero = sha224_zero_message_hash,
1331 .hash_init = n2_sha224_init,
1332 .auth_type = AUTH_TYPE_SHA256,
1333 .hmac_type = AUTH_TYPE_RESERVED,
1334 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1335 .digest_size = SHA224_DIGEST_SIZE,
1336 .block_size = SHA224_BLOCK_SIZE },
1338 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1340 static LIST_HEAD(ahash_algs);
1341 static LIST_HEAD(hmac_algs);
1343 static int algs_registered;
1345 static void __n2_unregister_algs(void)
1347 struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1348 struct n2_ahash_alg *alg, *alg_tmp;
1349 struct n2_hmac_alg *hmac, *hmac_tmp;
1351 list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1352 crypto_unregister_skcipher(&skcipher->skcipher);
1353 list_del(&skcipher->entry);
1356 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1357 crypto_unregister_ahash(&hmac->derived.alg);
1358 list_del(&hmac->derived.entry);
1361 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1362 crypto_unregister_ahash(&alg->alg);
1363 list_del(&alg->entry);
1368 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1370 crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1374 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1376 struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1377 struct skcipher_alg *alg;
1384 *alg = tmpl->skcipher;
1386 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1387 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1388 alg->base.cra_priority = N2_CRA_PRIORITY;
1389 alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1390 alg->base.cra_blocksize = tmpl->block_size;
1391 p->enc_type = tmpl->enc_type;
1392 alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1393 alg->base.cra_module = THIS_MODULE;
1394 alg->init = n2_skcipher_init_tfm;
1396 list_add(&p->entry, &skcipher_algs);
1397 err = crypto_register_skcipher(alg);
1399 pr_err("%s alg registration failed\n", alg->base.cra_name);
1400 list_del(&p->entry);
1403 pr_info("%s alg registered\n", alg->base.cra_name);
1408 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1410 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1411 struct ahash_alg *ahash;
1412 struct crypto_alg *base;
1418 p->child_alg = n2ahash->alg.halg.base.cra_name;
1419 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1420 INIT_LIST_HEAD(&p->derived.entry);
1422 ahash = &p->derived.alg;
1423 ahash->digest = n2_hmac_async_digest;
1424 ahash->setkey = n2_hmac_async_setkey;
1426 base = &ahash->halg.base;
1427 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1428 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1430 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1431 base->cra_init = n2_hmac_cra_init;
1432 base->cra_exit = n2_hmac_cra_exit;
1434 list_add(&p->derived.entry, &hmac_algs);
1435 err = crypto_register_ahash(ahash);
1437 pr_err("%s alg registration failed\n", base->cra_name);
1438 list_del(&p->derived.entry);
1441 pr_info("%s alg registered\n", base->cra_name);
1446 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1448 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1449 struct hash_alg_common *halg;
1450 struct crypto_alg *base;
1451 struct ahash_alg *ahash;
1457 p->hash_zero = tmpl->hash_zero;
1458 p->hash_init = tmpl->hash_init;
1459 p->auth_type = tmpl->auth_type;
1460 p->hmac_type = tmpl->hmac_type;
1461 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1462 p->digest_size = tmpl->digest_size;
1465 ahash->init = n2_hash_async_init;
1466 ahash->update = n2_hash_async_update;
1467 ahash->final = n2_hash_async_final;
1468 ahash->finup = n2_hash_async_finup;
1469 ahash->digest = n2_hash_async_digest;
1470 ahash->export = n2_hash_async_noexport;
1471 ahash->import = n2_hash_async_noimport;
1473 halg = &ahash->halg;
1474 halg->digestsize = tmpl->digest_size;
1477 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1478 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1479 base->cra_priority = N2_CRA_PRIORITY;
1480 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1481 CRYPTO_ALG_NEED_FALLBACK;
1482 base->cra_blocksize = tmpl->block_size;
1483 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1484 base->cra_module = THIS_MODULE;
1485 base->cra_init = n2_hash_cra_init;
1486 base->cra_exit = n2_hash_cra_exit;
1488 list_add(&p->entry, &ahash_algs);
1489 err = crypto_register_ahash(ahash);
1491 pr_err("%s alg registration failed\n", base->cra_name);
1492 list_del(&p->entry);
1495 pr_info("%s alg registered\n", base->cra_name);
1497 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1498 err = __n2_register_one_hmac(p);
1502 static int n2_register_algs(void)
1506 mutex_lock(&spu_lock);
1507 if (algs_registered++)
1510 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1511 err = __n2_register_one_ahash(&hash_tmpls[i]);
1513 __n2_unregister_algs();
1517 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1518 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1520 __n2_unregister_algs();
1526 mutex_unlock(&spu_lock);
1530 static void n2_unregister_algs(void)
1532 mutex_lock(&spu_lock);
1533 if (!--algs_registered)
1534 __n2_unregister_algs();
1535 mutex_unlock(&spu_lock);
1538 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1539 * a devino. This isn't very useful to us because all of the
1540 * interrupts listed in the device_node have been translated to
1541 * Linux virtual IRQ cookie numbers.
1543 * So we have to back-translate, going through the 'intr' and 'ino'
1544 * property tables of the n2cp MDESC node, matching it with the OF
1545 * 'interrupts' property entries, in order to to figure out which
1546 * devino goes to which already-translated IRQ.
1548 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1549 unsigned long dev_ino)
1551 const unsigned int *dev_intrs;
1555 for (i = 0; i < ip->num_intrs; i++) {
1556 if (ip->ino_table[i].ino == dev_ino)
1559 if (i == ip->num_intrs)
1562 intr = ip->ino_table[i].intr;
1564 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1568 for (i = 0; i < dev->archdata.num_irqs; i++) {
1569 if (dev_intrs[i] == intr)
1576 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1577 const char *irq_name, struct spu_queue *p,
1578 irq_handler_t handler)
1583 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1587 index = find_devino_index(dev, ip, p->devino);
1591 p->irq = dev->archdata.irqs[index];
1593 sprintf(p->irq_name, "%s-%d", irq_name, index);
1595 return request_irq(p->irq, handler, 0, p->irq_name, p);
1598 static struct kmem_cache *queue_cache[2];
1600 static void *new_queue(unsigned long q_type)
1602 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1605 static void free_queue(void *p, unsigned long q_type)
1607 kmem_cache_free(queue_cache[q_type - 1], p);
1610 static int queue_cache_init(void)
1612 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1613 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1614 kmem_cache_create("mau_queue",
1617 MAU_ENTRY_SIZE, 0, NULL);
1618 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1621 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1622 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1623 kmem_cache_create("cwq_queue",
1626 CWQ_ENTRY_SIZE, 0, NULL);
1627 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1628 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1629 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1635 static void queue_cache_destroy(void)
1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1638 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1639 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1640 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1643 static long spu_queue_register_workfn(void *arg)
1645 struct spu_qreg *qr = arg;
1646 struct spu_queue *p = qr->queue;
1647 unsigned long q_type = qr->type;
1648 unsigned long hv_ret;
1650 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1651 CWQ_NUM_ENTRIES, &p->qhandle);
1653 sun4v_ncs_sethead_marker(p->qhandle, 0);
1655 return hv_ret ? -EINVAL : 0;
1658 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1660 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1661 struct spu_qreg qr = { .queue = p, .type = q_type };
1663 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1666 static int spu_queue_setup(struct spu_queue *p)
1670 p->q = new_queue(p->q_type);
1674 err = spu_queue_register(p, p->q_type);
1676 free_queue(p->q, p->q_type);
1683 static void spu_queue_destroy(struct spu_queue *p)
1685 unsigned long hv_ret;
1690 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1693 free_queue(p->q, p->q_type);
1696 static void spu_list_destroy(struct list_head *list)
1698 struct spu_queue *p, *n;
1700 list_for_each_entry_safe(p, n, list, list) {
1703 for (i = 0; i < NR_CPUS; i++) {
1704 if (cpu_to_cwq[i] == p)
1705 cpu_to_cwq[i] = NULL;
1709 free_irq(p->irq, p);
1712 spu_queue_destroy(p);
1718 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1719 * gathering cpu membership information.
1721 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1722 struct platform_device *dev,
1723 u64 node, struct spu_queue *p,
1724 struct spu_queue **table)
1728 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1729 u64 tgt = mdesc_arc_target(mdesc, arc);
1730 const char *name = mdesc_node_name(mdesc, tgt);
1733 if (strcmp(name, "cpu"))
1735 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1736 if (table[*id] != NULL) {
1737 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1741 cpumask_set_cpu(*id, &p->sharing);
1747 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1748 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1749 struct platform_device *dev, struct mdesc_handle *mdesc,
1750 u64 node, const char *iname, unsigned long q_type,
1751 irq_handler_t handler, struct spu_queue **table)
1753 struct spu_queue *p;
1756 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1758 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1763 cpumask_clear(&p->sharing);
1764 spin_lock_init(&p->lock);
1766 INIT_LIST_HEAD(&p->jobs);
1767 list_add(&p->list, list);
1769 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1773 err = spu_queue_setup(p);
1777 return spu_map_ino(dev, ip, iname, p, handler);
1780 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1781 struct spu_mdesc_info *ip, struct list_head *list,
1782 const char *exec_name, unsigned long q_type,
1783 irq_handler_t handler, struct spu_queue **table)
1788 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1791 type = mdesc_get_property(mdesc, node, "type", NULL);
1792 if (!type || strcmp(type, exec_name))
1795 err = handle_exec_unit(ip, list, dev, mdesc, node,
1796 exec_name, q_type, handler, table);
1798 spu_list_destroy(list);
1806 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1807 struct spu_mdesc_info *ip)
1813 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1815 printk("NO 'ino'\n");
1819 ip->num_intrs = ino_len / sizeof(u64);
1820 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1826 for (i = 0; i < ip->num_intrs; i++) {
1827 struct ino_blob *b = &ip->ino_table[i];
1835 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1836 struct platform_device *dev,
1837 struct spu_mdesc_info *ip,
1838 const char *node_name)
1840 const unsigned int *reg;
1843 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1847 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1851 name = mdesc_get_property(mdesc, node, "name", NULL);
1852 if (!name || strcmp(name, node_name))
1854 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1855 if (!chdl || (*chdl != *reg))
1857 ip->cfg_handle = *chdl;
1858 return get_irq_props(mdesc, node, ip);
1864 static unsigned long n2_spu_hvapi_major;
1865 static unsigned long n2_spu_hvapi_minor;
1867 static int n2_spu_hvapi_register(void)
1871 n2_spu_hvapi_major = 2;
1872 n2_spu_hvapi_minor = 0;
1874 err = sun4v_hvapi_register(HV_GRP_NCS,
1876 &n2_spu_hvapi_minor);
1879 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1881 n2_spu_hvapi_minor);
1886 static void n2_spu_hvapi_unregister(void)
1888 sun4v_hvapi_unregister(HV_GRP_NCS);
1891 static int global_ref;
1893 static int grab_global_resources(void)
1897 mutex_lock(&spu_lock);
1902 err = n2_spu_hvapi_register();
1906 err = queue_cache_init();
1908 goto out_hvapi_release;
1911 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1914 goto out_queue_cache_destroy;
1916 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1919 goto out_free_cwq_table;
1926 mutex_unlock(&spu_lock);
1933 out_queue_cache_destroy:
1934 queue_cache_destroy();
1937 n2_spu_hvapi_unregister();
1941 static void release_global_resources(void)
1943 mutex_lock(&spu_lock);
1944 if (!--global_ref) {
1951 queue_cache_destroy();
1952 n2_spu_hvapi_unregister();
1954 mutex_unlock(&spu_lock);
1957 static struct n2_crypto *alloc_n2cp(void)
1959 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1962 INIT_LIST_HEAD(&np->cwq_list);
1967 static void free_n2cp(struct n2_crypto *np)
1969 kfree(np->cwq_info.ino_table);
1970 np->cwq_info.ino_table = NULL;
1975 static void n2_spu_driver_version(void)
1977 static int n2_spu_version_printed;
1979 if (n2_spu_version_printed++ == 0)
1980 pr_info("%s", version);
1983 static int n2_crypto_probe(struct platform_device *dev)
1985 struct mdesc_handle *mdesc;
1986 struct n2_crypto *np;
1989 n2_spu_driver_version();
1991 pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1995 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
2000 err = grab_global_resources();
2002 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2007 mdesc = mdesc_grab();
2010 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2013 goto out_free_global;
2015 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2017 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2019 mdesc_release(mdesc);
2020 goto out_free_global;
2023 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2024 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2026 mdesc_release(mdesc);
2029 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
2031 goto out_free_global;
2034 err = n2_register_algs();
2036 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
2038 goto out_free_spu_list;
2041 dev_set_drvdata(&dev->dev, np);
2046 spu_list_destroy(&np->cwq_list);
2049 release_global_resources();
2057 static int n2_crypto_remove(struct platform_device *dev)
2059 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2061 n2_unregister_algs();
2063 spu_list_destroy(&np->cwq_list);
2065 release_global_resources();
2072 static struct n2_mau *alloc_ncp(void)
2074 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2077 INIT_LIST_HEAD(&mp->mau_list);
2082 static void free_ncp(struct n2_mau *mp)
2084 kfree(mp->mau_info.ino_table);
2085 mp->mau_info.ino_table = NULL;
2090 static int n2_mau_probe(struct platform_device *dev)
2092 struct mdesc_handle *mdesc;
2096 n2_spu_driver_version();
2098 pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2102 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2107 err = grab_global_resources();
2109 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2114 mdesc = mdesc_grab();
2117 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2120 goto out_free_global;
2123 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2125 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2127 mdesc_release(mdesc);
2128 goto out_free_global;
2131 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2132 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2134 mdesc_release(mdesc);
2137 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2139 goto out_free_global;
2142 dev_set_drvdata(&dev->dev, mp);
2147 release_global_resources();
2155 static int n2_mau_remove(struct platform_device *dev)
2157 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2159 spu_list_destroy(&mp->mau_list);
2161 release_global_resources();
2168 static const struct of_device_id n2_crypto_match[] = {
2171 .compatible = "SUNW,n2-cwq",
2175 .compatible = "SUNW,vf-cwq",
2179 .compatible = "SUNW,kt-cwq",
2184 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2186 static struct platform_driver n2_crypto_driver = {
2189 .of_match_table = n2_crypto_match,
2191 .probe = n2_crypto_probe,
2192 .remove = n2_crypto_remove,
2195 static const struct of_device_id n2_mau_match[] = {
2198 .compatible = "SUNW,n2-mau",
2202 .compatible = "SUNW,vf-mau",
2206 .compatible = "SUNW,kt-mau",
2211 MODULE_DEVICE_TABLE(of, n2_mau_match);
2213 static struct platform_driver n2_mau_driver = {
2216 .of_match_table = n2_mau_match,
2218 .probe = n2_mau_probe,
2219 .remove = n2_mau_remove,
2222 static struct platform_driver * const drivers[] = {
2227 static int __init n2_init(void)
2229 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2232 static void __exit n2_exit(void)
2234 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2237 module_init(n2_init);
2238 module_exit(n2_exit);