]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/crypto/n2_core.c
e040912f790e5c747c3328703235de61ed144d5f
[linux.git] / drivers / crypto / n2_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3  *
4  * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/cpumask.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/crypto.h>
17 #include <crypto/md5.h>
18 #include <crypto/sha.h>
19 #include <crypto/aes.h>
20 #include <crypto/internal/des.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/sched.h>
24
25 #include <crypto/internal/hash.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/algapi.h>
29
30 #include <asm/hypervisor.h>
31 #include <asm/mdesc.h>
32
33 #include "n2_core.h"
34
35 #define DRV_MODULE_NAME         "n2_crypto"
36 #define DRV_MODULE_VERSION      "0.2"
37 #define DRV_MODULE_RELDATE      "July 28, 2011"
38
39 static const char version[] =
40         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Niagara2 Crypto driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46
47 #define N2_CRA_PRIORITY         200
48
49 static DEFINE_MUTEX(spu_lock);
50
51 struct spu_queue {
52         cpumask_t               sharing;
53         unsigned long           qhandle;
54
55         spinlock_t              lock;
56         u8                      q_type;
57         void                    *q;
58         unsigned long           head;
59         unsigned long           tail;
60         struct list_head        jobs;
61
62         unsigned long           devino;
63
64         char                    irq_name[32];
65         unsigned int            irq;
66
67         struct list_head        list;
68 };
69
70 struct spu_qreg {
71         struct spu_queue        *queue;
72         unsigned long           type;
73 };
74
75 static struct spu_queue **cpu_to_cwq;
76 static struct spu_queue **cpu_to_mau;
77
78 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
79 {
80         if (q->q_type == HV_NCS_QTYPE_MAU) {
81                 off += MAU_ENTRY_SIZE;
82                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
83                         off = 0;
84         } else {
85                 off += CWQ_ENTRY_SIZE;
86                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
87                         off = 0;
88         }
89         return off;
90 }
91
92 struct n2_request_common {
93         struct list_head        entry;
94         unsigned int            offset;
95 };
96 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
97
98 /* An async job request records the final tail value it used in
99  * n2_request_common->offset, test to see if that offset is in
100  * the range old_head, new_head, inclusive.
101  */
102 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
103                                 unsigned long old_head, unsigned long new_head)
104 {
105         if (old_head <= new_head) {
106                 if (offset > old_head && offset <= new_head)
107                         return true;
108         } else {
109                 if (offset > old_head || offset <= new_head)
110                         return true;
111         }
112         return false;
113 }
114
115 /* When the HEAD marker is unequal to the actual HEAD, we get
116  * a virtual device INO interrupt.  We should process the
117  * completed CWQ entries and adjust the HEAD marker to clear
118  * the IRQ.
119  */
120 static irqreturn_t cwq_intr(int irq, void *dev_id)
121 {
122         unsigned long off, new_head, hv_ret;
123         struct spu_queue *q = dev_id;
124
125         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
126                smp_processor_id(), q->qhandle);
127
128         spin_lock(&q->lock);
129
130         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
131
132         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
133                smp_processor_id(), new_head, hv_ret);
134
135         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
136                 /* XXX ... XXX */
137         }
138
139         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
140         if (hv_ret == HV_EOK)
141                 q->head = new_head;
142
143         spin_unlock(&q->lock);
144
145         return IRQ_HANDLED;
146 }
147
148 static irqreturn_t mau_intr(int irq, void *dev_id)
149 {
150         struct spu_queue *q = dev_id;
151         unsigned long head, hv_ret;
152
153         spin_lock(&q->lock);
154
155         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
156                smp_processor_id(), q->qhandle);
157
158         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
159
160         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
161                smp_processor_id(), head, hv_ret);
162
163         sun4v_ncs_sethead_marker(q->qhandle, head);
164
165         spin_unlock(&q->lock);
166
167         return IRQ_HANDLED;
168 }
169
170 static void *spu_queue_next(struct spu_queue *q, void *cur)
171 {
172         return q->q + spu_next_offset(q, cur - q->q);
173 }
174
175 static int spu_queue_num_free(struct spu_queue *q)
176 {
177         unsigned long head = q->head;
178         unsigned long tail = q->tail;
179         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
180         unsigned long diff;
181
182         if (head > tail)
183                 diff = head - tail;
184         else
185                 diff = (end - tail) + head;
186
187         return (diff / CWQ_ENTRY_SIZE) - 1;
188 }
189
190 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
191 {
192         int avail = spu_queue_num_free(q);
193
194         if (avail >= num_entries)
195                 return q->q + q->tail;
196
197         return NULL;
198 }
199
200 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
201 {
202         unsigned long hv_ret, new_tail;
203
204         new_tail = spu_next_offset(q, last - q->q);
205
206         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
207         if (hv_ret == HV_EOK)
208                 q->tail = new_tail;
209         return hv_ret;
210 }
211
212 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
213                              int enc_type, int auth_type,
214                              unsigned int hash_len,
215                              bool sfas, bool sob, bool eob, bool encrypt,
216                              int opcode)
217 {
218         u64 word = (len - 1) & CONTROL_LEN;
219
220         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
221         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
222         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
223         if (sfas)
224                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
225         if (sob)
226                 word |= CONTROL_START_OF_BLOCK;
227         if (eob)
228                 word |= CONTROL_END_OF_BLOCK;
229         if (encrypt)
230                 word |= CONTROL_ENCRYPT;
231         if (hmac_key_len)
232                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
233         if (hash_len)
234                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
235
236         return word;
237 }
238
239 #if 0
240 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
241 {
242         if (this_len >= 64 ||
243             qp->head != qp->tail)
244                 return true;
245         return false;
246 }
247 #endif
248
249 struct n2_ahash_alg {
250         struct list_head        entry;
251         const u8                *hash_zero;
252         const u32               *hash_init;
253         u8                      hw_op_hashsz;
254         u8                      digest_size;
255         u8                      auth_type;
256         u8                      hmac_type;
257         struct ahash_alg        alg;
258 };
259
260 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
261 {
262         struct crypto_alg *alg = tfm->__crt_alg;
263         struct ahash_alg *ahash_alg;
264
265         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
266
267         return container_of(ahash_alg, struct n2_ahash_alg, alg);
268 }
269
270 struct n2_hmac_alg {
271         const char              *child_alg;
272         struct n2_ahash_alg     derived;
273 };
274
275 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
276 {
277         struct crypto_alg *alg = tfm->__crt_alg;
278         struct ahash_alg *ahash_alg;
279
280         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
281
282         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
283 }
284
285 struct n2_hash_ctx {
286         struct crypto_ahash             *fallback_tfm;
287 };
288
289 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
290
291 struct n2_hmac_ctx {
292         struct n2_hash_ctx              base;
293
294         struct crypto_shash             *child_shash;
295
296         int                             hash_key_len;
297         unsigned char                   hash_key[N2_HASH_KEY_MAX];
298 };
299
300 struct n2_hash_req_ctx {
301         union {
302                 struct md5_state        md5;
303                 struct sha1_state       sha1;
304                 struct sha256_state     sha256;
305         } u;
306
307         struct ahash_request            fallback_req;
308 };
309
310 static int n2_hash_async_init(struct ahash_request *req)
311 {
312         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
313         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
314         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
315
316         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
317         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
318
319         return crypto_ahash_init(&rctx->fallback_req);
320 }
321
322 static int n2_hash_async_update(struct ahash_request *req)
323 {
324         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
325         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
327
328         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
329         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
330         rctx->fallback_req.nbytes = req->nbytes;
331         rctx->fallback_req.src = req->src;
332
333         return crypto_ahash_update(&rctx->fallback_req);
334 }
335
336 static int n2_hash_async_final(struct ahash_request *req)
337 {
338         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
339         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
340         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
341
342         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
343         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
344         rctx->fallback_req.result = req->result;
345
346         return crypto_ahash_final(&rctx->fallback_req);
347 }
348
349 static int n2_hash_async_finup(struct ahash_request *req)
350 {
351         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
352         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
353         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
354
355         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
356         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
357         rctx->fallback_req.nbytes = req->nbytes;
358         rctx->fallback_req.src = req->src;
359         rctx->fallback_req.result = req->result;
360
361         return crypto_ahash_finup(&rctx->fallback_req);
362 }
363
364 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
365 {
366         return -ENOSYS;
367 }
368
369 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
370 {
371         return -ENOSYS;
372 }
373
374 static int n2_hash_cra_init(struct crypto_tfm *tfm)
375 {
376         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
377         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
378         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
379         struct crypto_ahash *fallback_tfm;
380         int err;
381
382         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
383                                           CRYPTO_ALG_NEED_FALLBACK);
384         if (IS_ERR(fallback_tfm)) {
385                 pr_warning("Fallback driver '%s' could not be loaded!\n",
386                            fallback_driver_name);
387                 err = PTR_ERR(fallback_tfm);
388                 goto out;
389         }
390
391         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
392                                          crypto_ahash_reqsize(fallback_tfm)));
393
394         ctx->fallback_tfm = fallback_tfm;
395         return 0;
396
397 out:
398         return err;
399 }
400
401 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
402 {
403         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
404         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
405
406         crypto_free_ahash(ctx->fallback_tfm);
407 }
408
409 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
410 {
411         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
412         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
413         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
414         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
415         struct crypto_ahash *fallback_tfm;
416         struct crypto_shash *child_shash;
417         int err;
418
419         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
420                                           CRYPTO_ALG_NEED_FALLBACK);
421         if (IS_ERR(fallback_tfm)) {
422                 pr_warning("Fallback driver '%s' could not be loaded!\n",
423                            fallback_driver_name);
424                 err = PTR_ERR(fallback_tfm);
425                 goto out;
426         }
427
428         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
429         if (IS_ERR(child_shash)) {
430                 pr_warning("Child shash '%s' could not be loaded!\n",
431                            n2alg->child_alg);
432                 err = PTR_ERR(child_shash);
433                 goto out_free_fallback;
434         }
435
436         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
437                                          crypto_ahash_reqsize(fallback_tfm)));
438
439         ctx->child_shash = child_shash;
440         ctx->base.fallback_tfm = fallback_tfm;
441         return 0;
442
443 out_free_fallback:
444         crypto_free_ahash(fallback_tfm);
445
446 out:
447         return err;
448 }
449
450 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
451 {
452         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
453         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
454
455         crypto_free_ahash(ctx->base.fallback_tfm);
456         crypto_free_shash(ctx->child_shash);
457 }
458
459 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
460                                 unsigned int keylen)
461 {
462         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
463         struct crypto_shash *child_shash = ctx->child_shash;
464         struct crypto_ahash *fallback_tfm;
465         SHASH_DESC_ON_STACK(shash, child_shash);
466         int err, bs, ds;
467
468         fallback_tfm = ctx->base.fallback_tfm;
469         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
470         if (err)
471                 return err;
472
473         shash->tfm = child_shash;
474
475         bs = crypto_shash_blocksize(child_shash);
476         ds = crypto_shash_digestsize(child_shash);
477         BUG_ON(ds > N2_HASH_KEY_MAX);
478         if (keylen > bs) {
479                 err = crypto_shash_digest(shash, key, keylen,
480                                           ctx->hash_key);
481                 if (err)
482                         return err;
483                 keylen = ds;
484         } else if (keylen <= N2_HASH_KEY_MAX)
485                 memcpy(ctx->hash_key, key, keylen);
486
487         ctx->hash_key_len = keylen;
488
489         return err;
490 }
491
492 static unsigned long wait_for_tail(struct spu_queue *qp)
493 {
494         unsigned long head, hv_ret;
495
496         do {
497                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
498                 if (hv_ret != HV_EOK) {
499                         pr_err("Hypervisor error on gethead\n");
500                         break;
501                 }
502                 if (head == qp->tail) {
503                         qp->head = head;
504                         break;
505                 }
506         } while (1);
507         return hv_ret;
508 }
509
510 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
511                                               struct cwq_initial_entry *ent)
512 {
513         unsigned long hv_ret = spu_queue_submit(qp, ent);
514
515         if (hv_ret == HV_EOK)
516                 hv_ret = wait_for_tail(qp);
517
518         return hv_ret;
519 }
520
521 static int n2_do_async_digest(struct ahash_request *req,
522                               unsigned int auth_type, unsigned int digest_size,
523                               unsigned int result_size, void *hash_loc,
524                               unsigned long auth_key, unsigned int auth_key_len)
525 {
526         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
527         struct cwq_initial_entry *ent;
528         struct crypto_hash_walk walk;
529         struct spu_queue *qp;
530         unsigned long flags;
531         int err = -ENODEV;
532         int nbytes, cpu;
533
534         /* The total effective length of the operation may not
535          * exceed 2^16.
536          */
537         if (unlikely(req->nbytes > (1 << 16))) {
538                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
539                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
540
541                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
542                 rctx->fallback_req.base.flags =
543                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
544                 rctx->fallback_req.nbytes = req->nbytes;
545                 rctx->fallback_req.src = req->src;
546                 rctx->fallback_req.result = req->result;
547
548                 return crypto_ahash_digest(&rctx->fallback_req);
549         }
550
551         nbytes = crypto_hash_walk_first(req, &walk);
552
553         cpu = get_cpu();
554         qp = cpu_to_cwq[cpu];
555         if (!qp)
556                 goto out;
557
558         spin_lock_irqsave(&qp->lock, flags);
559
560         /* XXX can do better, improve this later by doing a by-hand scatterlist
561          * XXX walk, etc.
562          */
563         ent = qp->q + qp->tail;
564
565         ent->control = control_word_base(nbytes, auth_key_len, 0,
566                                          auth_type, digest_size,
567                                          false, true, false, false,
568                                          OPCODE_INPLACE_BIT |
569                                          OPCODE_AUTH_MAC);
570         ent->src_addr = __pa(walk.data);
571         ent->auth_key_addr = auth_key;
572         ent->auth_iv_addr = __pa(hash_loc);
573         ent->final_auth_state_addr = 0UL;
574         ent->enc_key_addr = 0UL;
575         ent->enc_iv_addr = 0UL;
576         ent->dest_addr = __pa(hash_loc);
577
578         nbytes = crypto_hash_walk_done(&walk, 0);
579         while (nbytes > 0) {
580                 ent = spu_queue_next(qp, ent);
581
582                 ent->control = (nbytes - 1);
583                 ent->src_addr = __pa(walk.data);
584                 ent->auth_key_addr = 0UL;
585                 ent->auth_iv_addr = 0UL;
586                 ent->final_auth_state_addr = 0UL;
587                 ent->enc_key_addr = 0UL;
588                 ent->enc_iv_addr = 0UL;
589                 ent->dest_addr = 0UL;
590
591                 nbytes = crypto_hash_walk_done(&walk, 0);
592         }
593         ent->control |= CONTROL_END_OF_BLOCK;
594
595         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
596                 err = -EINVAL;
597         else
598                 err = 0;
599
600         spin_unlock_irqrestore(&qp->lock, flags);
601
602         if (!err)
603                 memcpy(req->result, hash_loc, result_size);
604 out:
605         put_cpu();
606
607         return err;
608 }
609
610 static int n2_hash_async_digest(struct ahash_request *req)
611 {
612         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
613         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
614         int ds;
615
616         ds = n2alg->digest_size;
617         if (unlikely(req->nbytes == 0)) {
618                 memcpy(req->result, n2alg->hash_zero, ds);
619                 return 0;
620         }
621         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
622
623         return n2_do_async_digest(req, n2alg->auth_type,
624                                   n2alg->hw_op_hashsz, ds,
625                                   &rctx->u, 0UL, 0);
626 }
627
628 static int n2_hmac_async_digest(struct ahash_request *req)
629 {
630         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
631         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
632         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
633         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
634         int ds;
635
636         ds = n2alg->derived.digest_size;
637         if (unlikely(req->nbytes == 0) ||
638             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
639                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
640                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
641
642                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
643                 rctx->fallback_req.base.flags =
644                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
645                 rctx->fallback_req.nbytes = req->nbytes;
646                 rctx->fallback_req.src = req->src;
647                 rctx->fallback_req.result = req->result;
648
649                 return crypto_ahash_digest(&rctx->fallback_req);
650         }
651         memcpy(&rctx->u, n2alg->derived.hash_init,
652                n2alg->derived.hw_op_hashsz);
653
654         return n2_do_async_digest(req, n2alg->derived.hmac_type,
655                                   n2alg->derived.hw_op_hashsz, ds,
656                                   &rctx->u,
657                                   __pa(&ctx->hash_key),
658                                   ctx->hash_key_len);
659 }
660
661 struct n2_skcipher_context {
662         int                     key_len;
663         int                     enc_type;
664         union {
665                 u8              aes[AES_MAX_KEY_SIZE];
666                 u8              des[DES_KEY_SIZE];
667                 u8              des3[3 * DES_KEY_SIZE];
668                 u8              arc4[258]; /* S-box, X, Y */
669         } key;
670 };
671
672 #define N2_CHUNK_ARR_LEN        16
673
674 struct n2_crypto_chunk {
675         struct list_head        entry;
676         unsigned long           iv_paddr : 44;
677         unsigned long           arr_len : 20;
678         unsigned long           dest_paddr;
679         unsigned long           dest_final;
680         struct {
681                 unsigned long   src_paddr : 44;
682                 unsigned long   src_len : 20;
683         } arr[N2_CHUNK_ARR_LEN];
684 };
685
686 struct n2_request_context {
687         struct skcipher_walk    walk;
688         struct list_head        chunk_list;
689         struct n2_crypto_chunk  chunk;
690         u8                      temp_iv[16];
691 };
692
693 /* The SPU allows some level of flexibility for partial cipher blocks
694  * being specified in a descriptor.
695  *
696  * It merely requires that every descriptor's length field is at least
697  * as large as the cipher block size.  This means that a cipher block
698  * can span at most 2 descriptors.  However, this does not allow a
699  * partial block to span into the final descriptor as that would
700  * violate the rule (since every descriptor's length must be at lest
701  * the block size).  So, for example, assuming an 8 byte block size:
702  *
703  *      0xe --> 0xa --> 0x8
704  *
705  * is a valid length sequence, whereas:
706  *
707  *      0xe --> 0xb --> 0x7
708  *
709  * is not a valid sequence.
710  */
711
712 struct n2_skcipher_alg {
713         struct list_head        entry;
714         u8                      enc_type;
715         struct skcipher_alg     skcipher;
716 };
717
718 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
719 {
720         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
721
722         return container_of(alg, struct n2_skcipher_alg, skcipher);
723 }
724
725 struct n2_skcipher_request_context {
726         struct skcipher_walk    walk;
727 };
728
729 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
730                          unsigned int keylen)
731 {
732         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
733         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
734         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
735
736         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
737
738         switch (keylen) {
739         case AES_KEYSIZE_128:
740                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
741                 break;
742         case AES_KEYSIZE_192:
743                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
744                 break;
745         case AES_KEYSIZE_256:
746                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
747                 break;
748         default:
749                 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
750                 return -EINVAL;
751         }
752
753         ctx->key_len = keylen;
754         memcpy(ctx->key.aes, key, keylen);
755         return 0;
756 }
757
758 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
759                          unsigned int keylen)
760 {
761         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
762         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
763         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
764         int err;
765
766         err = verify_skcipher_des_key(skcipher, key);
767         if (err)
768                 return err;
769
770         ctx->enc_type = n2alg->enc_type;
771
772         ctx->key_len = keylen;
773         memcpy(ctx->key.des, key, keylen);
774         return 0;
775 }
776
777 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
778                           unsigned int keylen)
779 {
780         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
781         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
782         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
783         int err;
784
785         err = verify_skcipher_des3_key(skcipher, key);
786         if (err)
787                 return err;
788
789         ctx->enc_type = n2alg->enc_type;
790
791         ctx->key_len = keylen;
792         memcpy(ctx->key.des3, key, keylen);
793         return 0;
794 }
795
796 static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key,
797                           unsigned int keylen)
798 {
799         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
800         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
801         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
802         u8 *s = ctx->key.arc4;
803         u8 *x = s + 256;
804         u8 *y = x + 1;
805         int i, j, k;
806
807         ctx->enc_type = n2alg->enc_type;
808
809         j = k = 0;
810         *x = 0;
811         *y = 0;
812         for (i = 0; i < 256; i++)
813                 s[i] = i;
814         for (i = 0; i < 256; i++) {
815                 u8 a = s[i];
816                 j = (j + key[k] + a) & 0xff;
817                 s[i] = s[j];
818                 s[j] = a;
819                 if (++k >= keylen)
820                         k = 0;
821         }
822
823         return 0;
824 }
825
826 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
827 {
828         int this_len = nbytes;
829
830         this_len -= (nbytes & (block_size - 1));
831         return this_len > (1 << 16) ? (1 << 16) : this_len;
832 }
833
834 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
835                             struct n2_crypto_chunk *cp,
836                             struct spu_queue *qp, bool encrypt)
837 {
838         struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
839         struct cwq_initial_entry *ent;
840         bool in_place;
841         int i;
842
843         ent = spu_queue_alloc(qp, cp->arr_len);
844         if (!ent) {
845                 pr_info("queue_alloc() of %d fails\n",
846                         cp->arr_len);
847                 return -EBUSY;
848         }
849
850         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
851
852         ent->control = control_word_base(cp->arr[0].src_len,
853                                          0, ctx->enc_type, 0, 0,
854                                          false, true, false, encrypt,
855                                          OPCODE_ENCRYPT |
856                                          (in_place ? OPCODE_INPLACE_BIT : 0));
857         ent->src_addr = cp->arr[0].src_paddr;
858         ent->auth_key_addr = 0UL;
859         ent->auth_iv_addr = 0UL;
860         ent->final_auth_state_addr = 0UL;
861         ent->enc_key_addr = __pa(&ctx->key);
862         ent->enc_iv_addr = cp->iv_paddr;
863         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
864
865         for (i = 1; i < cp->arr_len; i++) {
866                 ent = spu_queue_next(qp, ent);
867
868                 ent->control = cp->arr[i].src_len - 1;
869                 ent->src_addr = cp->arr[i].src_paddr;
870                 ent->auth_key_addr = 0UL;
871                 ent->auth_iv_addr = 0UL;
872                 ent->final_auth_state_addr = 0UL;
873                 ent->enc_key_addr = 0UL;
874                 ent->enc_iv_addr = 0UL;
875                 ent->dest_addr = 0UL;
876         }
877         ent->control |= CONTROL_END_OF_BLOCK;
878
879         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
880 }
881
882 static int n2_compute_chunks(struct skcipher_request *req)
883 {
884         struct n2_request_context *rctx = skcipher_request_ctx(req);
885         struct skcipher_walk *walk = &rctx->walk;
886         struct n2_crypto_chunk *chunk;
887         unsigned long dest_prev;
888         unsigned int tot_len;
889         bool prev_in_place;
890         int err, nbytes;
891
892         err = skcipher_walk_async(walk, req);
893         if (err)
894                 return err;
895
896         INIT_LIST_HEAD(&rctx->chunk_list);
897
898         chunk = &rctx->chunk;
899         INIT_LIST_HEAD(&chunk->entry);
900
901         chunk->iv_paddr = 0UL;
902         chunk->arr_len = 0;
903         chunk->dest_paddr = 0UL;
904
905         prev_in_place = false;
906         dest_prev = ~0UL;
907         tot_len = 0;
908
909         while ((nbytes = walk->nbytes) != 0) {
910                 unsigned long dest_paddr, src_paddr;
911                 bool in_place;
912                 int this_len;
913
914                 src_paddr = (page_to_phys(walk->src.phys.page) +
915                              walk->src.phys.offset);
916                 dest_paddr = (page_to_phys(walk->dst.phys.page) +
917                               walk->dst.phys.offset);
918                 in_place = (src_paddr == dest_paddr);
919                 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
920
921                 if (chunk->arr_len != 0) {
922                         if (in_place != prev_in_place ||
923                             (!prev_in_place &&
924                              dest_paddr != dest_prev) ||
925                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
926                             tot_len + this_len > (1 << 16)) {
927                                 chunk->dest_final = dest_prev;
928                                 list_add_tail(&chunk->entry,
929                                               &rctx->chunk_list);
930                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
931                                 if (!chunk) {
932                                         err = -ENOMEM;
933                                         break;
934                                 }
935                                 INIT_LIST_HEAD(&chunk->entry);
936                         }
937                 }
938                 if (chunk->arr_len == 0) {
939                         chunk->dest_paddr = dest_paddr;
940                         tot_len = 0;
941                 }
942                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
943                 chunk->arr[chunk->arr_len].src_len = this_len;
944                 chunk->arr_len++;
945
946                 dest_prev = dest_paddr + this_len;
947                 prev_in_place = in_place;
948                 tot_len += this_len;
949
950                 err = skcipher_walk_done(walk, nbytes - this_len);
951                 if (err)
952                         break;
953         }
954         if (!err && chunk->arr_len != 0) {
955                 chunk->dest_final = dest_prev;
956                 list_add_tail(&chunk->entry, &rctx->chunk_list);
957         }
958
959         return err;
960 }
961
962 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
963 {
964         struct n2_request_context *rctx = skcipher_request_ctx(req);
965         struct n2_crypto_chunk *c, *tmp;
966
967         if (final_iv)
968                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
969
970         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
971                 list_del(&c->entry);
972                 if (unlikely(c != &rctx->chunk))
973                         kfree(c);
974         }
975
976 }
977
978 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
979 {
980         struct n2_request_context *rctx = skcipher_request_ctx(req);
981         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
982         int err = n2_compute_chunks(req);
983         struct n2_crypto_chunk *c, *tmp;
984         unsigned long flags, hv_ret;
985         struct spu_queue *qp;
986
987         if (err)
988                 return err;
989
990         qp = cpu_to_cwq[get_cpu()];
991         err = -ENODEV;
992         if (!qp)
993                 goto out;
994
995         spin_lock_irqsave(&qp->lock, flags);
996
997         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
998                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
999                 if (err)
1000                         break;
1001                 list_del(&c->entry);
1002                 if (unlikely(c != &rctx->chunk))
1003                         kfree(c);
1004         }
1005         if (!err) {
1006                 hv_ret = wait_for_tail(qp);
1007                 if (hv_ret != HV_EOK)
1008                         err = -EINVAL;
1009         }
1010
1011         spin_unlock_irqrestore(&qp->lock, flags);
1012
1013 out:
1014         put_cpu();
1015
1016         n2_chunk_complete(req, NULL);
1017         return err;
1018 }
1019
1020 static int n2_encrypt_ecb(struct skcipher_request *req)
1021 {
1022         return n2_do_ecb(req, true);
1023 }
1024
1025 static int n2_decrypt_ecb(struct skcipher_request *req)
1026 {
1027         return n2_do_ecb(req, false);
1028 }
1029
1030 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
1031 {
1032         struct n2_request_context *rctx = skcipher_request_ctx(req);
1033         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1034         unsigned long flags, hv_ret, iv_paddr;
1035         int err = n2_compute_chunks(req);
1036         struct n2_crypto_chunk *c, *tmp;
1037         struct spu_queue *qp;
1038         void *final_iv_addr;
1039
1040         final_iv_addr = NULL;
1041
1042         if (err)
1043                 return err;
1044
1045         qp = cpu_to_cwq[get_cpu()];
1046         err = -ENODEV;
1047         if (!qp)
1048                 goto out;
1049
1050         spin_lock_irqsave(&qp->lock, flags);
1051
1052         if (encrypt) {
1053                 iv_paddr = __pa(rctx->walk.iv);
1054                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1055                                          entry) {
1056                         c->iv_paddr = iv_paddr;
1057                         err = __n2_crypt_chunk(tfm, c, qp, true);
1058                         if (err)
1059                                 break;
1060                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1061                         list_del(&c->entry);
1062                         if (unlikely(c != &rctx->chunk))
1063                                 kfree(c);
1064                 }
1065                 final_iv_addr = __va(iv_paddr);
1066         } else {
1067                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1068                                                  entry) {
1069                         if (c == &rctx->chunk) {
1070                                 iv_paddr = __pa(rctx->walk.iv);
1071                         } else {
1072                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1073                                             tmp->arr[tmp->arr_len-1].src_len -
1074                                             rctx->walk.blocksize);
1075                         }
1076                         if (!final_iv_addr) {
1077                                 unsigned long pa;
1078
1079                                 pa = (c->arr[c->arr_len-1].src_paddr +
1080                                       c->arr[c->arr_len-1].src_len -
1081                                       rctx->walk.blocksize);
1082                                 final_iv_addr = rctx->temp_iv;
1083                                 memcpy(rctx->temp_iv, __va(pa),
1084                                        rctx->walk.blocksize);
1085                         }
1086                         c->iv_paddr = iv_paddr;
1087                         err = __n2_crypt_chunk(tfm, c, qp, false);
1088                         if (err)
1089                                 break;
1090                         list_del(&c->entry);
1091                         if (unlikely(c != &rctx->chunk))
1092                                 kfree(c);
1093                 }
1094         }
1095         if (!err) {
1096                 hv_ret = wait_for_tail(qp);
1097                 if (hv_ret != HV_EOK)
1098                         err = -EINVAL;
1099         }
1100
1101         spin_unlock_irqrestore(&qp->lock, flags);
1102
1103 out:
1104         put_cpu();
1105
1106         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1107         return err;
1108 }
1109
1110 static int n2_encrypt_chaining(struct skcipher_request *req)
1111 {
1112         return n2_do_chaining(req, true);
1113 }
1114
1115 static int n2_decrypt_chaining(struct skcipher_request *req)
1116 {
1117         return n2_do_chaining(req, false);
1118 }
1119
1120 struct n2_skcipher_tmpl {
1121         const char              *name;
1122         const char              *drv_name;
1123         u8                      block_size;
1124         u8                      enc_type;
1125         struct skcipher_alg     skcipher;
1126 };
1127
1128 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1129         /* ARC4: only ECB is supported (chaining bits ignored) */
1130         {       .name           = "ecb(arc4)",
1131                 .drv_name       = "ecb-arc4",
1132                 .block_size     = 1,
1133                 .enc_type       = (ENC_TYPE_ALG_RC4_STREAM |
1134                                    ENC_TYPE_CHAINING_ECB),
1135                 .skcipher       = {
1136                         .min_keysize    = 1,
1137                         .max_keysize    = 256,
1138                         .setkey         = n2_arc4_setkey,
1139                         .encrypt        = n2_encrypt_ecb,
1140                         .decrypt        = n2_decrypt_ecb,
1141                 },
1142         },
1143
1144         /* DES: ECB CBC and CFB are supported */
1145         {       .name           = "ecb(des)",
1146                 .drv_name       = "ecb-des",
1147                 .block_size     = DES_BLOCK_SIZE,
1148                 .enc_type       = (ENC_TYPE_ALG_DES |
1149                                    ENC_TYPE_CHAINING_ECB),
1150                 .skcipher       = {
1151                         .min_keysize    = DES_KEY_SIZE,
1152                         .max_keysize    = DES_KEY_SIZE,
1153                         .setkey         = n2_des_setkey,
1154                         .encrypt        = n2_encrypt_ecb,
1155                         .decrypt        = n2_decrypt_ecb,
1156                 },
1157         },
1158         {       .name           = "cbc(des)",
1159                 .drv_name       = "cbc-des",
1160                 .block_size     = DES_BLOCK_SIZE,
1161                 .enc_type       = (ENC_TYPE_ALG_DES |
1162                                    ENC_TYPE_CHAINING_CBC),
1163                 .skcipher       = {
1164                         .ivsize         = DES_BLOCK_SIZE,
1165                         .min_keysize    = DES_KEY_SIZE,
1166                         .max_keysize    = DES_KEY_SIZE,
1167                         .setkey         = n2_des_setkey,
1168                         .encrypt        = n2_encrypt_chaining,
1169                         .decrypt        = n2_decrypt_chaining,
1170                 },
1171         },
1172         {       .name           = "cfb(des)",
1173                 .drv_name       = "cfb-des",
1174                 .block_size     = DES_BLOCK_SIZE,
1175                 .enc_type       = (ENC_TYPE_ALG_DES |
1176                                    ENC_TYPE_CHAINING_CFB),
1177                 .skcipher       = {
1178                         .min_keysize    = DES_KEY_SIZE,
1179                         .max_keysize    = DES_KEY_SIZE,
1180                         .setkey         = n2_des_setkey,
1181                         .encrypt        = n2_encrypt_chaining,
1182                         .decrypt        = n2_decrypt_chaining,
1183                 },
1184         },
1185
1186         /* 3DES: ECB CBC and CFB are supported */
1187         {       .name           = "ecb(des3_ede)",
1188                 .drv_name       = "ecb-3des",
1189                 .block_size     = DES_BLOCK_SIZE,
1190                 .enc_type       = (ENC_TYPE_ALG_3DES |
1191                                    ENC_TYPE_CHAINING_ECB),
1192                 .skcipher       = {
1193                         .min_keysize    = 3 * DES_KEY_SIZE,
1194                         .max_keysize    = 3 * DES_KEY_SIZE,
1195                         .setkey         = n2_3des_setkey,
1196                         .encrypt        = n2_encrypt_ecb,
1197                         .decrypt        = n2_decrypt_ecb,
1198                 },
1199         },
1200         {       .name           = "cbc(des3_ede)",
1201                 .drv_name       = "cbc-3des",
1202                 .block_size     = DES_BLOCK_SIZE,
1203                 .enc_type       = (ENC_TYPE_ALG_3DES |
1204                                    ENC_TYPE_CHAINING_CBC),
1205                 .skcipher       = {
1206                         .ivsize         = DES_BLOCK_SIZE,
1207                         .min_keysize    = 3 * DES_KEY_SIZE,
1208                         .max_keysize    = 3 * DES_KEY_SIZE,
1209                         .setkey         = n2_3des_setkey,
1210                         .encrypt        = n2_encrypt_chaining,
1211                         .decrypt        = n2_decrypt_chaining,
1212                 },
1213         },
1214         {       .name           = "cfb(des3_ede)",
1215                 .drv_name       = "cfb-3des",
1216                 .block_size     = DES_BLOCK_SIZE,
1217                 .enc_type       = (ENC_TYPE_ALG_3DES |
1218                                    ENC_TYPE_CHAINING_CFB),
1219                 .skcipher       = {
1220                         .min_keysize    = 3 * DES_KEY_SIZE,
1221                         .max_keysize    = 3 * DES_KEY_SIZE,
1222                         .setkey         = n2_3des_setkey,
1223                         .encrypt        = n2_encrypt_chaining,
1224                         .decrypt        = n2_decrypt_chaining,
1225                 },
1226         },
1227         /* AES: ECB CBC and CTR are supported */
1228         {       .name           = "ecb(aes)",
1229                 .drv_name       = "ecb-aes",
1230                 .block_size     = AES_BLOCK_SIZE,
1231                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1232                                    ENC_TYPE_CHAINING_ECB),
1233                 .skcipher       = {
1234                         .min_keysize    = AES_MIN_KEY_SIZE,
1235                         .max_keysize    = AES_MAX_KEY_SIZE,
1236                         .setkey         = n2_aes_setkey,
1237                         .encrypt        = n2_encrypt_ecb,
1238                         .decrypt        = n2_decrypt_ecb,
1239                 },
1240         },
1241         {       .name           = "cbc(aes)",
1242                 .drv_name       = "cbc-aes",
1243                 .block_size     = AES_BLOCK_SIZE,
1244                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1245                                    ENC_TYPE_CHAINING_CBC),
1246                 .skcipher       = {
1247                         .ivsize         = AES_BLOCK_SIZE,
1248                         .min_keysize    = AES_MIN_KEY_SIZE,
1249                         .max_keysize    = AES_MAX_KEY_SIZE,
1250                         .setkey         = n2_aes_setkey,
1251                         .encrypt        = n2_encrypt_chaining,
1252                         .decrypt        = n2_decrypt_chaining,
1253                 },
1254         },
1255         {       .name           = "ctr(aes)",
1256                 .drv_name       = "ctr-aes",
1257                 .block_size     = AES_BLOCK_SIZE,
1258                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1259                                    ENC_TYPE_CHAINING_COUNTER),
1260                 .skcipher       = {
1261                         .ivsize         = AES_BLOCK_SIZE,
1262                         .min_keysize    = AES_MIN_KEY_SIZE,
1263                         .max_keysize    = AES_MAX_KEY_SIZE,
1264                         .setkey         = n2_aes_setkey,
1265                         .encrypt        = n2_encrypt_chaining,
1266                         .decrypt        = n2_encrypt_chaining,
1267                 },
1268         },
1269
1270 };
1271 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1272
1273 static LIST_HEAD(skcipher_algs);
1274
1275 struct n2_hash_tmpl {
1276         const char      *name;
1277         const u8        *hash_zero;
1278         const u32       *hash_init;
1279         u8              hw_op_hashsz;
1280         u8              digest_size;
1281         u8              block_size;
1282         u8              auth_type;
1283         u8              hmac_type;
1284 };
1285
1286 static const u32 n2_md5_init[MD5_HASH_WORDS] = {
1287         cpu_to_le32(MD5_H0),
1288         cpu_to_le32(MD5_H1),
1289         cpu_to_le32(MD5_H2),
1290         cpu_to_le32(MD5_H3),
1291 };
1292 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1293         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1294 };
1295 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1296         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1297         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1298 };
1299 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1300         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1301         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1302 };
1303
1304 static const struct n2_hash_tmpl hash_tmpls[] = {
1305         { .name         = "md5",
1306           .hash_zero    = md5_zero_message_hash,
1307           .hash_init    = n2_md5_init,
1308           .auth_type    = AUTH_TYPE_MD5,
1309           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1310           .hw_op_hashsz = MD5_DIGEST_SIZE,
1311           .digest_size  = MD5_DIGEST_SIZE,
1312           .block_size   = MD5_HMAC_BLOCK_SIZE },
1313         { .name         = "sha1",
1314           .hash_zero    = sha1_zero_message_hash,
1315           .hash_init    = n2_sha1_init,
1316           .auth_type    = AUTH_TYPE_SHA1,
1317           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1318           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1319           .digest_size  = SHA1_DIGEST_SIZE,
1320           .block_size   = SHA1_BLOCK_SIZE },
1321         { .name         = "sha256",
1322           .hash_zero    = sha256_zero_message_hash,
1323           .hash_init    = n2_sha256_init,
1324           .auth_type    = AUTH_TYPE_SHA256,
1325           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1326           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1327           .digest_size  = SHA256_DIGEST_SIZE,
1328           .block_size   = SHA256_BLOCK_SIZE },
1329         { .name         = "sha224",
1330           .hash_zero    = sha224_zero_message_hash,
1331           .hash_init    = n2_sha224_init,
1332           .auth_type    = AUTH_TYPE_SHA256,
1333           .hmac_type    = AUTH_TYPE_RESERVED,
1334           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1335           .digest_size  = SHA224_DIGEST_SIZE,
1336           .block_size   = SHA224_BLOCK_SIZE },
1337 };
1338 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1339
1340 static LIST_HEAD(ahash_algs);
1341 static LIST_HEAD(hmac_algs);
1342
1343 static int algs_registered;
1344
1345 static void __n2_unregister_algs(void)
1346 {
1347         struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1348         struct n2_ahash_alg *alg, *alg_tmp;
1349         struct n2_hmac_alg *hmac, *hmac_tmp;
1350
1351         list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1352                 crypto_unregister_skcipher(&skcipher->skcipher);
1353                 list_del(&skcipher->entry);
1354                 kfree(skcipher);
1355         }
1356         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1357                 crypto_unregister_ahash(&hmac->derived.alg);
1358                 list_del(&hmac->derived.entry);
1359                 kfree(hmac);
1360         }
1361         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1362                 crypto_unregister_ahash(&alg->alg);
1363                 list_del(&alg->entry);
1364                 kfree(alg);
1365         }
1366 }
1367
1368 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1369 {
1370         crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1371         return 0;
1372 }
1373
1374 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1375 {
1376         struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1377         struct skcipher_alg *alg;
1378         int err;
1379
1380         if (!p)
1381                 return -ENOMEM;
1382
1383         alg = &p->skcipher;
1384         *alg = tmpl->skcipher;
1385
1386         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1387         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1388         alg->base.cra_priority = N2_CRA_PRIORITY;
1389         alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1390         alg->base.cra_blocksize = tmpl->block_size;
1391         p->enc_type = tmpl->enc_type;
1392         alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1393         alg->base.cra_module = THIS_MODULE;
1394         alg->init = n2_skcipher_init_tfm;
1395
1396         list_add(&p->entry, &skcipher_algs);
1397         err = crypto_register_skcipher(alg);
1398         if (err) {
1399                 pr_err("%s alg registration failed\n", alg->base.cra_name);
1400                 list_del(&p->entry);
1401                 kfree(p);
1402         } else {
1403                 pr_info("%s alg registered\n", alg->base.cra_name);
1404         }
1405         return err;
1406 }
1407
1408 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1409 {
1410         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1411         struct ahash_alg *ahash;
1412         struct crypto_alg *base;
1413         int err;
1414
1415         if (!p)
1416                 return -ENOMEM;
1417
1418         p->child_alg = n2ahash->alg.halg.base.cra_name;
1419         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1420         INIT_LIST_HEAD(&p->derived.entry);
1421
1422         ahash = &p->derived.alg;
1423         ahash->digest = n2_hmac_async_digest;
1424         ahash->setkey = n2_hmac_async_setkey;
1425
1426         base = &ahash->halg.base;
1427         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1428         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1429
1430         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1431         base->cra_init = n2_hmac_cra_init;
1432         base->cra_exit = n2_hmac_cra_exit;
1433
1434         list_add(&p->derived.entry, &hmac_algs);
1435         err = crypto_register_ahash(ahash);
1436         if (err) {
1437                 pr_err("%s alg registration failed\n", base->cra_name);
1438                 list_del(&p->derived.entry);
1439                 kfree(p);
1440         } else {
1441                 pr_info("%s alg registered\n", base->cra_name);
1442         }
1443         return err;
1444 }
1445
1446 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1447 {
1448         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1449         struct hash_alg_common *halg;
1450         struct crypto_alg *base;
1451         struct ahash_alg *ahash;
1452         int err;
1453
1454         if (!p)
1455                 return -ENOMEM;
1456
1457         p->hash_zero = tmpl->hash_zero;
1458         p->hash_init = tmpl->hash_init;
1459         p->auth_type = tmpl->auth_type;
1460         p->hmac_type = tmpl->hmac_type;
1461         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1462         p->digest_size = tmpl->digest_size;
1463
1464         ahash = &p->alg;
1465         ahash->init = n2_hash_async_init;
1466         ahash->update = n2_hash_async_update;
1467         ahash->final = n2_hash_async_final;
1468         ahash->finup = n2_hash_async_finup;
1469         ahash->digest = n2_hash_async_digest;
1470         ahash->export = n2_hash_async_noexport;
1471         ahash->import = n2_hash_async_noimport;
1472
1473         halg = &ahash->halg;
1474         halg->digestsize = tmpl->digest_size;
1475
1476         base = &halg->base;
1477         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1478         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1479         base->cra_priority = N2_CRA_PRIORITY;
1480         base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1481                           CRYPTO_ALG_NEED_FALLBACK;
1482         base->cra_blocksize = tmpl->block_size;
1483         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1484         base->cra_module = THIS_MODULE;
1485         base->cra_init = n2_hash_cra_init;
1486         base->cra_exit = n2_hash_cra_exit;
1487
1488         list_add(&p->entry, &ahash_algs);
1489         err = crypto_register_ahash(ahash);
1490         if (err) {
1491                 pr_err("%s alg registration failed\n", base->cra_name);
1492                 list_del(&p->entry);
1493                 kfree(p);
1494         } else {
1495                 pr_info("%s alg registered\n", base->cra_name);
1496         }
1497         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1498                 err = __n2_register_one_hmac(p);
1499         return err;
1500 }
1501
1502 static int n2_register_algs(void)
1503 {
1504         int i, err = 0;
1505
1506         mutex_lock(&spu_lock);
1507         if (algs_registered++)
1508                 goto out;
1509
1510         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1511                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1512                 if (err) {
1513                         __n2_unregister_algs();
1514                         goto out;
1515                 }
1516         }
1517         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1518                 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1519                 if (err) {
1520                         __n2_unregister_algs();
1521                         goto out;
1522                 }
1523         }
1524
1525 out:
1526         mutex_unlock(&spu_lock);
1527         return err;
1528 }
1529
1530 static void n2_unregister_algs(void)
1531 {
1532         mutex_lock(&spu_lock);
1533         if (!--algs_registered)
1534                 __n2_unregister_algs();
1535         mutex_unlock(&spu_lock);
1536 }
1537
1538 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1539  * a devino.  This isn't very useful to us because all of the
1540  * interrupts listed in the device_node have been translated to
1541  * Linux virtual IRQ cookie numbers.
1542  *
1543  * So we have to back-translate, going through the 'intr' and 'ino'
1544  * property tables of the n2cp MDESC node, matching it with the OF
1545  * 'interrupts' property entries, in order to to figure out which
1546  * devino goes to which already-translated IRQ.
1547  */
1548 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1549                              unsigned long dev_ino)
1550 {
1551         const unsigned int *dev_intrs;
1552         unsigned int intr;
1553         int i;
1554
1555         for (i = 0; i < ip->num_intrs; i++) {
1556                 if (ip->ino_table[i].ino == dev_ino)
1557                         break;
1558         }
1559         if (i == ip->num_intrs)
1560                 return -ENODEV;
1561
1562         intr = ip->ino_table[i].intr;
1563
1564         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1565         if (!dev_intrs)
1566                 return -ENODEV;
1567
1568         for (i = 0; i < dev->archdata.num_irqs; i++) {
1569                 if (dev_intrs[i] == intr)
1570                         return i;
1571         }
1572
1573         return -ENODEV;
1574 }
1575
1576 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1577                        const char *irq_name, struct spu_queue *p,
1578                        irq_handler_t handler)
1579 {
1580         unsigned long herr;
1581         int index;
1582
1583         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1584         if (herr)
1585                 return -EINVAL;
1586
1587         index = find_devino_index(dev, ip, p->devino);
1588         if (index < 0)
1589                 return index;
1590
1591         p->irq = dev->archdata.irqs[index];
1592
1593         sprintf(p->irq_name, "%s-%d", irq_name, index);
1594
1595         return request_irq(p->irq, handler, 0, p->irq_name, p);
1596 }
1597
1598 static struct kmem_cache *queue_cache[2];
1599
1600 static void *new_queue(unsigned long q_type)
1601 {
1602         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1603 }
1604
1605 static void free_queue(void *p, unsigned long q_type)
1606 {
1607         kmem_cache_free(queue_cache[q_type - 1], p);
1608 }
1609
1610 static int queue_cache_init(void)
1611 {
1612         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1613                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1614                         kmem_cache_create("mau_queue",
1615                                           (MAU_NUM_ENTRIES *
1616                                            MAU_ENTRY_SIZE),
1617                                           MAU_ENTRY_SIZE, 0, NULL);
1618         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1619                 return -ENOMEM;
1620
1621         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1622                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1623                         kmem_cache_create("cwq_queue",
1624                                           (CWQ_NUM_ENTRIES *
1625                                            CWQ_ENTRY_SIZE),
1626                                           CWQ_ENTRY_SIZE, 0, NULL);
1627         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1628                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1629                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1630                 return -ENOMEM;
1631         }
1632         return 0;
1633 }
1634
1635 static void queue_cache_destroy(void)
1636 {
1637         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1638         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1639         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1640         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1641 }
1642
1643 static long spu_queue_register_workfn(void *arg)
1644 {
1645         struct spu_qreg *qr = arg;
1646         struct spu_queue *p = qr->queue;
1647         unsigned long q_type = qr->type;
1648         unsigned long hv_ret;
1649
1650         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1651                                  CWQ_NUM_ENTRIES, &p->qhandle);
1652         if (!hv_ret)
1653                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1654
1655         return hv_ret ? -EINVAL : 0;
1656 }
1657
1658 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1659 {
1660         int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1661         struct spu_qreg qr = { .queue = p, .type = q_type };
1662
1663         return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1664 }
1665
1666 static int spu_queue_setup(struct spu_queue *p)
1667 {
1668         int err;
1669
1670         p->q = new_queue(p->q_type);
1671         if (!p->q)
1672                 return -ENOMEM;
1673
1674         err = spu_queue_register(p, p->q_type);
1675         if (err) {
1676                 free_queue(p->q, p->q_type);
1677                 p->q = NULL;
1678         }
1679
1680         return err;
1681 }
1682
1683 static void spu_queue_destroy(struct spu_queue *p)
1684 {
1685         unsigned long hv_ret;
1686
1687         if (!p->q)
1688                 return;
1689
1690         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1691
1692         if (!hv_ret)
1693                 free_queue(p->q, p->q_type);
1694 }
1695
1696 static void spu_list_destroy(struct list_head *list)
1697 {
1698         struct spu_queue *p, *n;
1699
1700         list_for_each_entry_safe(p, n, list, list) {
1701                 int i;
1702
1703                 for (i = 0; i < NR_CPUS; i++) {
1704                         if (cpu_to_cwq[i] == p)
1705                                 cpu_to_cwq[i] = NULL;
1706                 }
1707
1708                 if (p->irq) {
1709                         free_irq(p->irq, p);
1710                         p->irq = 0;
1711                 }
1712                 spu_queue_destroy(p);
1713                 list_del(&p->list);
1714                 kfree(p);
1715         }
1716 }
1717
1718 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1719  * gathering cpu membership information.
1720  */
1721 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1722                                struct platform_device *dev,
1723                                u64 node, struct spu_queue *p,
1724                                struct spu_queue **table)
1725 {
1726         u64 arc;
1727
1728         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1729                 u64 tgt = mdesc_arc_target(mdesc, arc);
1730                 const char *name = mdesc_node_name(mdesc, tgt);
1731                 const u64 *id;
1732
1733                 if (strcmp(name, "cpu"))
1734                         continue;
1735                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1736                 if (table[*id] != NULL) {
1737                         dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1738                                 dev->dev.of_node);
1739                         return -EINVAL;
1740                 }
1741                 cpumask_set_cpu(*id, &p->sharing);
1742                 table[*id] = p;
1743         }
1744         return 0;
1745 }
1746
1747 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1748 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1749                             struct platform_device *dev, struct mdesc_handle *mdesc,
1750                             u64 node, const char *iname, unsigned long q_type,
1751                             irq_handler_t handler, struct spu_queue **table)
1752 {
1753         struct spu_queue *p;
1754         int err;
1755
1756         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1757         if (!p) {
1758                 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1759                         dev->dev.of_node);
1760                 return -ENOMEM;
1761         }
1762
1763         cpumask_clear(&p->sharing);
1764         spin_lock_init(&p->lock);
1765         p->q_type = q_type;
1766         INIT_LIST_HEAD(&p->jobs);
1767         list_add(&p->list, list);
1768
1769         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1770         if (err)
1771                 return err;
1772
1773         err = spu_queue_setup(p);
1774         if (err)
1775                 return err;
1776
1777         return spu_map_ino(dev, ip, iname, p, handler);
1778 }
1779
1780 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1781                           struct spu_mdesc_info *ip, struct list_head *list,
1782                           const char *exec_name, unsigned long q_type,
1783                           irq_handler_t handler, struct spu_queue **table)
1784 {
1785         int err = 0;
1786         u64 node;
1787
1788         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1789                 const char *type;
1790
1791                 type = mdesc_get_property(mdesc, node, "type", NULL);
1792                 if (!type || strcmp(type, exec_name))
1793                         continue;
1794
1795                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1796                                        exec_name, q_type, handler, table);
1797                 if (err) {
1798                         spu_list_destroy(list);
1799                         break;
1800                 }
1801         }
1802
1803         return err;
1804 }
1805
1806 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1807                          struct spu_mdesc_info *ip)
1808 {
1809         const u64 *ino;
1810         int ino_len;
1811         int i;
1812
1813         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1814         if (!ino) {
1815                 printk("NO 'ino'\n");
1816                 return -ENODEV;
1817         }
1818
1819         ip->num_intrs = ino_len / sizeof(u64);
1820         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1821                                  ip->num_intrs),
1822                                 GFP_KERNEL);
1823         if (!ip->ino_table)
1824                 return -ENOMEM;
1825
1826         for (i = 0; i < ip->num_intrs; i++) {
1827                 struct ino_blob *b = &ip->ino_table[i];
1828                 b->intr = i + 1;
1829                 b->ino = ino[i];
1830         }
1831
1832         return 0;
1833 }
1834
1835 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1836                                 struct platform_device *dev,
1837                                 struct spu_mdesc_info *ip,
1838                                 const char *node_name)
1839 {
1840         const unsigned int *reg;
1841         u64 node;
1842
1843         reg = of_get_property(dev->dev.of_node, "reg", NULL);
1844         if (!reg)
1845                 return -ENODEV;
1846
1847         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1848                 const char *name;
1849                 const u64 *chdl;
1850
1851                 name = mdesc_get_property(mdesc, node, "name", NULL);
1852                 if (!name || strcmp(name, node_name))
1853                         continue;
1854                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1855                 if (!chdl || (*chdl != *reg))
1856                         continue;
1857                 ip->cfg_handle = *chdl;
1858                 return get_irq_props(mdesc, node, ip);
1859         }
1860
1861         return -ENODEV;
1862 }
1863
1864 static unsigned long n2_spu_hvapi_major;
1865 static unsigned long n2_spu_hvapi_minor;
1866
1867 static int n2_spu_hvapi_register(void)
1868 {
1869         int err;
1870
1871         n2_spu_hvapi_major = 2;
1872         n2_spu_hvapi_minor = 0;
1873
1874         err = sun4v_hvapi_register(HV_GRP_NCS,
1875                                    n2_spu_hvapi_major,
1876                                    &n2_spu_hvapi_minor);
1877
1878         if (!err)
1879                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1880                         n2_spu_hvapi_major,
1881                         n2_spu_hvapi_minor);
1882
1883         return err;
1884 }
1885
1886 static void n2_spu_hvapi_unregister(void)
1887 {
1888         sun4v_hvapi_unregister(HV_GRP_NCS);
1889 }
1890
1891 static int global_ref;
1892
1893 static int grab_global_resources(void)
1894 {
1895         int err = 0;
1896
1897         mutex_lock(&spu_lock);
1898
1899         if (global_ref++)
1900                 goto out;
1901
1902         err = n2_spu_hvapi_register();
1903         if (err)
1904                 goto out;
1905
1906         err = queue_cache_init();
1907         if (err)
1908                 goto out_hvapi_release;
1909
1910         err = -ENOMEM;
1911         cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1912                              GFP_KERNEL);
1913         if (!cpu_to_cwq)
1914                 goto out_queue_cache_destroy;
1915
1916         cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1917                              GFP_KERNEL);
1918         if (!cpu_to_mau)
1919                 goto out_free_cwq_table;
1920
1921         err = 0;
1922
1923 out:
1924         if (err)
1925                 global_ref--;
1926         mutex_unlock(&spu_lock);
1927         return err;
1928
1929 out_free_cwq_table:
1930         kfree(cpu_to_cwq);
1931         cpu_to_cwq = NULL;
1932
1933 out_queue_cache_destroy:
1934         queue_cache_destroy();
1935
1936 out_hvapi_release:
1937         n2_spu_hvapi_unregister();
1938         goto out;
1939 }
1940
1941 static void release_global_resources(void)
1942 {
1943         mutex_lock(&spu_lock);
1944         if (!--global_ref) {
1945                 kfree(cpu_to_cwq);
1946                 cpu_to_cwq = NULL;
1947
1948                 kfree(cpu_to_mau);
1949                 cpu_to_mau = NULL;
1950
1951                 queue_cache_destroy();
1952                 n2_spu_hvapi_unregister();
1953         }
1954         mutex_unlock(&spu_lock);
1955 }
1956
1957 static struct n2_crypto *alloc_n2cp(void)
1958 {
1959         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1960
1961         if (np)
1962                 INIT_LIST_HEAD(&np->cwq_list);
1963
1964         return np;
1965 }
1966
1967 static void free_n2cp(struct n2_crypto *np)
1968 {
1969         kfree(np->cwq_info.ino_table);
1970         np->cwq_info.ino_table = NULL;
1971
1972         kfree(np);
1973 }
1974
1975 static void n2_spu_driver_version(void)
1976 {
1977         static int n2_spu_version_printed;
1978
1979         if (n2_spu_version_printed++ == 0)
1980                 pr_info("%s", version);
1981 }
1982
1983 static int n2_crypto_probe(struct platform_device *dev)
1984 {
1985         struct mdesc_handle *mdesc;
1986         struct n2_crypto *np;
1987         int err;
1988
1989         n2_spu_driver_version();
1990
1991         pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1992
1993         np = alloc_n2cp();
1994         if (!np) {
1995                 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1996                         dev->dev.of_node);
1997                 return -ENOMEM;
1998         }
1999
2000         err = grab_global_resources();
2001         if (err) {
2002                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2003                         dev->dev.of_node);
2004                 goto out_free_n2cp;
2005         }
2006
2007         mdesc = mdesc_grab();
2008
2009         if (!mdesc) {
2010                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2011                         dev->dev.of_node);
2012                 err = -ENODEV;
2013                 goto out_free_global;
2014         }
2015         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2016         if (err) {
2017                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2018                         dev->dev.of_node);
2019                 mdesc_release(mdesc);
2020                 goto out_free_global;
2021         }
2022
2023         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2024                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2025                              cpu_to_cwq);
2026         mdesc_release(mdesc);
2027
2028         if (err) {
2029                 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
2030                         dev->dev.of_node);
2031                 goto out_free_global;
2032         }
2033
2034         err = n2_register_algs();
2035         if (err) {
2036                 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
2037                         dev->dev.of_node);
2038                 goto out_free_spu_list;
2039         }
2040
2041         dev_set_drvdata(&dev->dev, np);
2042
2043         return 0;
2044
2045 out_free_spu_list:
2046         spu_list_destroy(&np->cwq_list);
2047
2048 out_free_global:
2049         release_global_resources();
2050
2051 out_free_n2cp:
2052         free_n2cp(np);
2053
2054         return err;
2055 }
2056
2057 static int n2_crypto_remove(struct platform_device *dev)
2058 {
2059         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2060
2061         n2_unregister_algs();
2062
2063         spu_list_destroy(&np->cwq_list);
2064
2065         release_global_resources();
2066
2067         free_n2cp(np);
2068
2069         return 0;
2070 }
2071
2072 static struct n2_mau *alloc_ncp(void)
2073 {
2074         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2075
2076         if (mp)
2077                 INIT_LIST_HEAD(&mp->mau_list);
2078
2079         return mp;
2080 }
2081
2082 static void free_ncp(struct n2_mau *mp)
2083 {
2084         kfree(mp->mau_info.ino_table);
2085         mp->mau_info.ino_table = NULL;
2086
2087         kfree(mp);
2088 }
2089
2090 static int n2_mau_probe(struct platform_device *dev)
2091 {
2092         struct mdesc_handle *mdesc;
2093         struct n2_mau *mp;
2094         int err;
2095
2096         n2_spu_driver_version();
2097
2098         pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2099
2100         mp = alloc_ncp();
2101         if (!mp) {
2102                 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2103                         dev->dev.of_node);
2104                 return -ENOMEM;
2105         }
2106
2107         err = grab_global_resources();
2108         if (err) {
2109                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2110                         dev->dev.of_node);
2111                 goto out_free_ncp;
2112         }
2113
2114         mdesc = mdesc_grab();
2115
2116         if (!mdesc) {
2117                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2118                         dev->dev.of_node);
2119                 err = -ENODEV;
2120                 goto out_free_global;
2121         }
2122
2123         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2124         if (err) {
2125                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2126                         dev->dev.of_node);
2127                 mdesc_release(mdesc);
2128                 goto out_free_global;
2129         }
2130
2131         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2132                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2133                              cpu_to_mau);
2134         mdesc_release(mdesc);
2135
2136         if (err) {
2137                 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2138                         dev->dev.of_node);
2139                 goto out_free_global;
2140         }
2141
2142         dev_set_drvdata(&dev->dev, mp);
2143
2144         return 0;
2145
2146 out_free_global:
2147         release_global_resources();
2148
2149 out_free_ncp:
2150         free_ncp(mp);
2151
2152         return err;
2153 }
2154
2155 static int n2_mau_remove(struct platform_device *dev)
2156 {
2157         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2158
2159         spu_list_destroy(&mp->mau_list);
2160
2161         release_global_resources();
2162
2163         free_ncp(mp);
2164
2165         return 0;
2166 }
2167
2168 static const struct of_device_id n2_crypto_match[] = {
2169         {
2170                 .name = "n2cp",
2171                 .compatible = "SUNW,n2-cwq",
2172         },
2173         {
2174                 .name = "n2cp",
2175                 .compatible = "SUNW,vf-cwq",
2176         },
2177         {
2178                 .name = "n2cp",
2179                 .compatible = "SUNW,kt-cwq",
2180         },
2181         {},
2182 };
2183
2184 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2185
2186 static struct platform_driver n2_crypto_driver = {
2187         .driver = {
2188                 .name           =       "n2cp",
2189                 .of_match_table =       n2_crypto_match,
2190         },
2191         .probe          =       n2_crypto_probe,
2192         .remove         =       n2_crypto_remove,
2193 };
2194
2195 static const struct of_device_id n2_mau_match[] = {
2196         {
2197                 .name = "ncp",
2198                 .compatible = "SUNW,n2-mau",
2199         },
2200         {
2201                 .name = "ncp",
2202                 .compatible = "SUNW,vf-mau",
2203         },
2204         {
2205                 .name = "ncp",
2206                 .compatible = "SUNW,kt-mau",
2207         },
2208         {},
2209 };
2210
2211 MODULE_DEVICE_TABLE(of, n2_mau_match);
2212
2213 static struct platform_driver n2_mau_driver = {
2214         .driver = {
2215                 .name           =       "ncp",
2216                 .of_match_table =       n2_mau_match,
2217         },
2218         .probe          =       n2_mau_probe,
2219         .remove         =       n2_mau_remove,
2220 };
2221
2222 static struct platform_driver * const drivers[] = {
2223         &n2_crypto_driver,
2224         &n2_mau_driver,
2225 };
2226
2227 static int __init n2_init(void)
2228 {
2229         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2230 }
2231
2232 static void __exit n2_exit(void)
2233 {
2234         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2235 }
2236
2237 module_init(n2_init);
2238 module_exit(n2_exit);