]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/s390/crypto/aes_s390.c
ASoC: fsl_asrc: protect macro argument
[linux.git] / arch / s390 / crypto / aes_s390.c
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005, 2017
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *              Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *              Patrick Steuer <patrick.steuer@de.ibm.com>
11  *              Harald Freudenberger <freude@de.ibm.com>
12  *
13  * Derived from "crypto/aes_generic.c"
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the Free
17  * Software Foundation; either version 2 of the License, or (at your option)
18  * any later version.
19  *
20  */
21
22 #define KMSG_COMPONENT "aes_s390"
23 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24
25 #include <crypto/aes.h>
26 #include <crypto/algapi.h>
27 #include <crypto/ghash.h>
28 #include <crypto/internal/aead.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
33 #include <linux/cpufeature.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/fips.h>
37 #include <linux/string.h>
38 #include <crypto/xts.h>
39 #include <asm/cpacf.h>
40
41 static u8 *ctrblk;
42 static DEFINE_SPINLOCK(ctrblk_lock);
43
44 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
45                     kma_functions;
46
47 struct s390_aes_ctx {
48         u8 key[AES_MAX_KEY_SIZE];
49         int key_len;
50         unsigned long fc;
51         union {
52                 struct crypto_skcipher *blk;
53                 struct crypto_cipher *cip;
54         } fallback;
55 };
56
57 struct s390_xts_ctx {
58         u8 key[32];
59         u8 pcc_key[32];
60         int key_len;
61         unsigned long fc;
62         struct crypto_skcipher *fallback;
63 };
64
65 struct gcm_sg_walk {
66         struct scatter_walk walk;
67         unsigned int walk_bytes;
68         u8 *walk_ptr;
69         unsigned int walk_bytes_remain;
70         u8 buf[AES_BLOCK_SIZE];
71         unsigned int buf_bytes;
72         u8 *ptr;
73         unsigned int nbytes;
74 };
75
76 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
77                 unsigned int key_len)
78 {
79         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
80         int ret;
81
82         sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
83         sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
84                         CRYPTO_TFM_REQ_MASK);
85
86         ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
87         if (ret) {
88                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
89                 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
90                                 CRYPTO_TFM_RES_MASK);
91         }
92         return ret;
93 }
94
95 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
96                        unsigned int key_len)
97 {
98         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
99         unsigned long fc;
100
101         /* Pick the correct function code based on the key length */
102         fc = (key_len == 16) ? CPACF_KM_AES_128 :
103              (key_len == 24) ? CPACF_KM_AES_192 :
104              (key_len == 32) ? CPACF_KM_AES_256 : 0;
105
106         /* Check if the function code is available */
107         sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
108         if (!sctx->fc)
109                 return setkey_fallback_cip(tfm, in_key, key_len);
110
111         sctx->key_len = key_len;
112         memcpy(sctx->key, in_key, key_len);
113         return 0;
114 }
115
116 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117 {
118         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
119
120         if (unlikely(!sctx->fc)) {
121                 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
122                 return;
123         }
124         cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
125 }
126
127 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
128 {
129         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
130
131         if (unlikely(!sctx->fc)) {
132                 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
133                 return;
134         }
135         cpacf_km(sctx->fc | CPACF_DECRYPT,
136                  &sctx->key, out, in, AES_BLOCK_SIZE);
137 }
138
139 static int fallback_init_cip(struct crypto_tfm *tfm)
140 {
141         const char *name = tfm->__crt_alg->cra_name;
142         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
143
144         sctx->fallback.cip = crypto_alloc_cipher(name, 0,
145                         CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
146
147         if (IS_ERR(sctx->fallback.cip)) {
148                 pr_err("Allocating AES fallback algorithm %s failed\n",
149                        name);
150                 return PTR_ERR(sctx->fallback.cip);
151         }
152
153         return 0;
154 }
155
156 static void fallback_exit_cip(struct crypto_tfm *tfm)
157 {
158         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
159
160         crypto_free_cipher(sctx->fallback.cip);
161         sctx->fallback.cip = NULL;
162 }
163
164 static struct crypto_alg aes_alg = {
165         .cra_name               =       "aes",
166         .cra_driver_name        =       "aes-s390",
167         .cra_priority           =       300,
168         .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER |
169                                         CRYPTO_ALG_NEED_FALLBACK,
170         .cra_blocksize          =       AES_BLOCK_SIZE,
171         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
172         .cra_module             =       THIS_MODULE,
173         .cra_init               =       fallback_init_cip,
174         .cra_exit               =       fallback_exit_cip,
175         .cra_u                  =       {
176                 .cipher = {
177                         .cia_min_keysize        =       AES_MIN_KEY_SIZE,
178                         .cia_max_keysize        =       AES_MAX_KEY_SIZE,
179                         .cia_setkey             =       aes_set_key,
180                         .cia_encrypt            =       aes_encrypt,
181                         .cia_decrypt            =       aes_decrypt,
182                 }
183         }
184 };
185
186 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
187                 unsigned int len)
188 {
189         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
190         unsigned int ret;
191
192         crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
193         crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
194                                                       CRYPTO_TFM_REQ_MASK);
195
196         ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
197
198         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
199         tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
200                           CRYPTO_TFM_RES_MASK;
201
202         return ret;
203 }
204
205 static int fallback_blk_dec(struct blkcipher_desc *desc,
206                 struct scatterlist *dst, struct scatterlist *src,
207                 unsigned int nbytes)
208 {
209         unsigned int ret;
210         struct crypto_blkcipher *tfm = desc->tfm;
211         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
212         SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
213
214         skcipher_request_set_tfm(req, sctx->fallback.blk);
215         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
216         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
217
218         ret = crypto_skcipher_decrypt(req);
219
220         skcipher_request_zero(req);
221         return ret;
222 }
223
224 static int fallback_blk_enc(struct blkcipher_desc *desc,
225                 struct scatterlist *dst, struct scatterlist *src,
226                 unsigned int nbytes)
227 {
228         unsigned int ret;
229         struct crypto_blkcipher *tfm = desc->tfm;
230         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
231         SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
232
233         skcipher_request_set_tfm(req, sctx->fallback.blk);
234         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
235         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
236
237         ret = crypto_skcipher_encrypt(req);
238         return ret;
239 }
240
241 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
242                            unsigned int key_len)
243 {
244         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
245         unsigned long fc;
246
247         /* Pick the correct function code based on the key length */
248         fc = (key_len == 16) ? CPACF_KM_AES_128 :
249              (key_len == 24) ? CPACF_KM_AES_192 :
250              (key_len == 32) ? CPACF_KM_AES_256 : 0;
251
252         /* Check if the function code is available */
253         sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
254         if (!sctx->fc)
255                 return setkey_fallback_blk(tfm, in_key, key_len);
256
257         sctx->key_len = key_len;
258         memcpy(sctx->key, in_key, key_len);
259         return 0;
260 }
261
262 static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
263                          struct blkcipher_walk *walk)
264 {
265         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
266         unsigned int nbytes, n;
267         int ret;
268
269         ret = blkcipher_walk_virt(desc, walk);
270         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
271                 /* only use complete blocks */
272                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
273                 cpacf_km(sctx->fc | modifier, sctx->key,
274                          walk->dst.virt.addr, walk->src.virt.addr, n);
275                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
276         }
277
278         return ret;
279 }
280
281 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
282                            struct scatterlist *dst, struct scatterlist *src,
283                            unsigned int nbytes)
284 {
285         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
286         struct blkcipher_walk walk;
287
288         if (unlikely(!sctx->fc))
289                 return fallback_blk_enc(desc, dst, src, nbytes);
290
291         blkcipher_walk_init(&walk, dst, src, nbytes);
292         return ecb_aes_crypt(desc, 0, &walk);
293 }
294
295 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
296                            struct scatterlist *dst, struct scatterlist *src,
297                            unsigned int nbytes)
298 {
299         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
300         struct blkcipher_walk walk;
301
302         if (unlikely(!sctx->fc))
303                 return fallback_blk_dec(desc, dst, src, nbytes);
304
305         blkcipher_walk_init(&walk, dst, src, nbytes);
306         return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
307 }
308
309 static int fallback_init_blk(struct crypto_tfm *tfm)
310 {
311         const char *name = tfm->__crt_alg->cra_name;
312         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
313
314         sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
315                                                    CRYPTO_ALG_ASYNC |
316                                                    CRYPTO_ALG_NEED_FALLBACK);
317
318         if (IS_ERR(sctx->fallback.blk)) {
319                 pr_err("Allocating AES fallback algorithm %s failed\n",
320                        name);
321                 return PTR_ERR(sctx->fallback.blk);
322         }
323
324         return 0;
325 }
326
327 static void fallback_exit_blk(struct crypto_tfm *tfm)
328 {
329         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
330
331         crypto_free_skcipher(sctx->fallback.blk);
332 }
333
334 static struct crypto_alg ecb_aes_alg = {
335         .cra_name               =       "ecb(aes)",
336         .cra_driver_name        =       "ecb-aes-s390",
337         .cra_priority           =       400,    /* combo: aes + ecb */
338         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
339                                         CRYPTO_ALG_NEED_FALLBACK,
340         .cra_blocksize          =       AES_BLOCK_SIZE,
341         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
342         .cra_type               =       &crypto_blkcipher_type,
343         .cra_module             =       THIS_MODULE,
344         .cra_init               =       fallback_init_blk,
345         .cra_exit               =       fallback_exit_blk,
346         .cra_u                  =       {
347                 .blkcipher = {
348                         .min_keysize            =       AES_MIN_KEY_SIZE,
349                         .max_keysize            =       AES_MAX_KEY_SIZE,
350                         .setkey                 =       ecb_aes_set_key,
351                         .encrypt                =       ecb_aes_encrypt,
352                         .decrypt                =       ecb_aes_decrypt,
353                 }
354         }
355 };
356
357 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
358                            unsigned int key_len)
359 {
360         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
361         unsigned long fc;
362
363         /* Pick the correct function code based on the key length */
364         fc = (key_len == 16) ? CPACF_KMC_AES_128 :
365              (key_len == 24) ? CPACF_KMC_AES_192 :
366              (key_len == 32) ? CPACF_KMC_AES_256 : 0;
367
368         /* Check if the function code is available */
369         sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
370         if (!sctx->fc)
371                 return setkey_fallback_blk(tfm, in_key, key_len);
372
373         sctx->key_len = key_len;
374         memcpy(sctx->key, in_key, key_len);
375         return 0;
376 }
377
378 static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
379                          struct blkcipher_walk *walk)
380 {
381         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
382         unsigned int nbytes, n;
383         int ret;
384         struct {
385                 u8 iv[AES_BLOCK_SIZE];
386                 u8 key[AES_MAX_KEY_SIZE];
387         } param;
388
389         ret = blkcipher_walk_virt(desc, walk);
390         memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
391         memcpy(param.key, sctx->key, sctx->key_len);
392         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
393                 /* only use complete blocks */
394                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
395                 cpacf_kmc(sctx->fc | modifier, &param,
396                           walk->dst.virt.addr, walk->src.virt.addr, n);
397                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
398         }
399         memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
400         return ret;
401 }
402
403 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
404                            struct scatterlist *dst, struct scatterlist *src,
405                            unsigned int nbytes)
406 {
407         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
408         struct blkcipher_walk walk;
409
410         if (unlikely(!sctx->fc))
411                 return fallback_blk_enc(desc, dst, src, nbytes);
412
413         blkcipher_walk_init(&walk, dst, src, nbytes);
414         return cbc_aes_crypt(desc, 0, &walk);
415 }
416
417 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
418                            struct scatterlist *dst, struct scatterlist *src,
419                            unsigned int nbytes)
420 {
421         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
422         struct blkcipher_walk walk;
423
424         if (unlikely(!sctx->fc))
425                 return fallback_blk_dec(desc, dst, src, nbytes);
426
427         blkcipher_walk_init(&walk, dst, src, nbytes);
428         return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
429 }
430
431 static struct crypto_alg cbc_aes_alg = {
432         .cra_name               =       "cbc(aes)",
433         .cra_driver_name        =       "cbc-aes-s390",
434         .cra_priority           =       400,    /* combo: aes + cbc */
435         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
436                                         CRYPTO_ALG_NEED_FALLBACK,
437         .cra_blocksize          =       AES_BLOCK_SIZE,
438         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
439         .cra_type               =       &crypto_blkcipher_type,
440         .cra_module             =       THIS_MODULE,
441         .cra_init               =       fallback_init_blk,
442         .cra_exit               =       fallback_exit_blk,
443         .cra_u                  =       {
444                 .blkcipher = {
445                         .min_keysize            =       AES_MIN_KEY_SIZE,
446                         .max_keysize            =       AES_MAX_KEY_SIZE,
447                         .ivsize                 =       AES_BLOCK_SIZE,
448                         .setkey                 =       cbc_aes_set_key,
449                         .encrypt                =       cbc_aes_encrypt,
450                         .decrypt                =       cbc_aes_decrypt,
451                 }
452         }
453 };
454
455 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
456                                    unsigned int len)
457 {
458         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
459         unsigned int ret;
460
461         crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
462         crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
463                                                      CRYPTO_TFM_REQ_MASK);
464
465         ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
466
467         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
468         tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
469                           CRYPTO_TFM_RES_MASK;
470
471         return ret;
472 }
473
474 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
475                 struct scatterlist *dst, struct scatterlist *src,
476                 unsigned int nbytes)
477 {
478         struct crypto_blkcipher *tfm = desc->tfm;
479         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
480         SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
481         unsigned int ret;
482
483         skcipher_request_set_tfm(req, xts_ctx->fallback);
484         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
485         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
486
487         ret = crypto_skcipher_decrypt(req);
488
489         skcipher_request_zero(req);
490         return ret;
491 }
492
493 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
494                 struct scatterlist *dst, struct scatterlist *src,
495                 unsigned int nbytes)
496 {
497         struct crypto_blkcipher *tfm = desc->tfm;
498         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
499         SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
500         unsigned int ret;
501
502         skcipher_request_set_tfm(req, xts_ctx->fallback);
503         skcipher_request_set_callback(req, desc->flags, NULL, NULL);
504         skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
505
506         ret = crypto_skcipher_encrypt(req);
507
508         skcipher_request_zero(req);
509         return ret;
510 }
511
512 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
513                            unsigned int key_len)
514 {
515         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
516         unsigned long fc;
517         int err;
518
519         err = xts_check_key(tfm, in_key, key_len);
520         if (err)
521                 return err;
522
523         /* In fips mode only 128 bit or 256 bit keys are valid */
524         if (fips_enabled && key_len != 32 && key_len != 64) {
525                 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
526                 return -EINVAL;
527         }
528
529         /* Pick the correct function code based on the key length */
530         fc = (key_len == 32) ? CPACF_KM_XTS_128 :
531              (key_len == 64) ? CPACF_KM_XTS_256 : 0;
532
533         /* Check if the function code is available */
534         xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
535         if (!xts_ctx->fc)
536                 return xts_fallback_setkey(tfm, in_key, key_len);
537
538         /* Split the XTS key into the two subkeys */
539         key_len = key_len / 2;
540         xts_ctx->key_len = key_len;
541         memcpy(xts_ctx->key, in_key, key_len);
542         memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
543         return 0;
544 }
545
546 static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
547                          struct blkcipher_walk *walk)
548 {
549         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
550         unsigned int offset, nbytes, n;
551         int ret;
552         struct {
553                 u8 key[32];
554                 u8 tweak[16];
555                 u8 block[16];
556                 u8 bit[16];
557                 u8 xts[16];
558         } pcc_param;
559         struct {
560                 u8 key[32];
561                 u8 init[16];
562         } xts_param;
563
564         ret = blkcipher_walk_virt(desc, walk);
565         offset = xts_ctx->key_len & 0x10;
566         memset(pcc_param.block, 0, sizeof(pcc_param.block));
567         memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
568         memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
569         memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
570         memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
571         cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
572
573         memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
574         memcpy(xts_param.init, pcc_param.xts, 16);
575
576         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
577                 /* only use complete blocks */
578                 n = nbytes & ~(AES_BLOCK_SIZE - 1);
579                 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
580                          walk->dst.virt.addr, walk->src.virt.addr, n);
581                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
582         }
583         return ret;
584 }
585
586 static int xts_aes_encrypt(struct blkcipher_desc *desc,
587                            struct scatterlist *dst, struct scatterlist *src,
588                            unsigned int nbytes)
589 {
590         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
591         struct blkcipher_walk walk;
592
593         if (unlikely(!xts_ctx->fc))
594                 return xts_fallback_encrypt(desc, dst, src, nbytes);
595
596         blkcipher_walk_init(&walk, dst, src, nbytes);
597         return xts_aes_crypt(desc, 0, &walk);
598 }
599
600 static int xts_aes_decrypt(struct blkcipher_desc *desc,
601                            struct scatterlist *dst, struct scatterlist *src,
602                            unsigned int nbytes)
603 {
604         struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
605         struct blkcipher_walk walk;
606
607         if (unlikely(!xts_ctx->fc))
608                 return xts_fallback_decrypt(desc, dst, src, nbytes);
609
610         blkcipher_walk_init(&walk, dst, src, nbytes);
611         return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
612 }
613
614 static int xts_fallback_init(struct crypto_tfm *tfm)
615 {
616         const char *name = tfm->__crt_alg->cra_name;
617         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
618
619         xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
620                                                   CRYPTO_ALG_ASYNC |
621                                                   CRYPTO_ALG_NEED_FALLBACK);
622
623         if (IS_ERR(xts_ctx->fallback)) {
624                 pr_err("Allocating XTS fallback algorithm %s failed\n",
625                        name);
626                 return PTR_ERR(xts_ctx->fallback);
627         }
628         return 0;
629 }
630
631 static void xts_fallback_exit(struct crypto_tfm *tfm)
632 {
633         struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
634
635         crypto_free_skcipher(xts_ctx->fallback);
636 }
637
638 static struct crypto_alg xts_aes_alg = {
639         .cra_name               =       "xts(aes)",
640         .cra_driver_name        =       "xts-aes-s390",
641         .cra_priority           =       400,    /* combo: aes + xts */
642         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
643                                         CRYPTO_ALG_NEED_FALLBACK,
644         .cra_blocksize          =       AES_BLOCK_SIZE,
645         .cra_ctxsize            =       sizeof(struct s390_xts_ctx),
646         .cra_type               =       &crypto_blkcipher_type,
647         .cra_module             =       THIS_MODULE,
648         .cra_init               =       xts_fallback_init,
649         .cra_exit               =       xts_fallback_exit,
650         .cra_u                  =       {
651                 .blkcipher = {
652                         .min_keysize            =       2 * AES_MIN_KEY_SIZE,
653                         .max_keysize            =       2 * AES_MAX_KEY_SIZE,
654                         .ivsize                 =       AES_BLOCK_SIZE,
655                         .setkey                 =       xts_aes_set_key,
656                         .encrypt                =       xts_aes_encrypt,
657                         .decrypt                =       xts_aes_decrypt,
658                 }
659         }
660 };
661
662 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
663                            unsigned int key_len)
664 {
665         struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
666         unsigned long fc;
667
668         /* Pick the correct function code based on the key length */
669         fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
670              (key_len == 24) ? CPACF_KMCTR_AES_192 :
671              (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
672
673         /* Check if the function code is available */
674         sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
675         if (!sctx->fc)
676                 return setkey_fallback_blk(tfm, in_key, key_len);
677
678         sctx->key_len = key_len;
679         memcpy(sctx->key, in_key, key_len);
680         return 0;
681 }
682
683 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
684 {
685         unsigned int i, n;
686
687         /* only use complete blocks, max. PAGE_SIZE */
688         memcpy(ctrptr, iv, AES_BLOCK_SIZE);
689         n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
690         for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
691                 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
692                 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
693                 ctrptr += AES_BLOCK_SIZE;
694         }
695         return n;
696 }
697
698 static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
699                          struct blkcipher_walk *walk)
700 {
701         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
702         u8 buf[AES_BLOCK_SIZE], *ctrptr;
703         unsigned int n, nbytes;
704         int ret, locked;
705
706         locked = spin_trylock(&ctrblk_lock);
707
708         ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
709         while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
710                 n = AES_BLOCK_SIZE;
711                 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
712                         n = __ctrblk_init(ctrblk, walk->iv, nbytes);
713                 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
714                 cpacf_kmctr(sctx->fc | modifier, sctx->key,
715                             walk->dst.virt.addr, walk->src.virt.addr,
716                             n, ctrptr);
717                 if (ctrptr == ctrblk)
718                         memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
719                                AES_BLOCK_SIZE);
720                 crypto_inc(walk->iv, AES_BLOCK_SIZE);
721                 ret = blkcipher_walk_done(desc, walk, nbytes - n);
722         }
723         if (locked)
724                 spin_unlock(&ctrblk_lock);
725         /*
726          * final block may be < AES_BLOCK_SIZE, copy only nbytes
727          */
728         if (nbytes) {
729                 cpacf_kmctr(sctx->fc | modifier, sctx->key,
730                             buf, walk->src.virt.addr,
731                             AES_BLOCK_SIZE, walk->iv);
732                 memcpy(walk->dst.virt.addr, buf, nbytes);
733                 crypto_inc(walk->iv, AES_BLOCK_SIZE);
734                 ret = blkcipher_walk_done(desc, walk, 0);
735         }
736
737         return ret;
738 }
739
740 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
741                            struct scatterlist *dst, struct scatterlist *src,
742                            unsigned int nbytes)
743 {
744         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
745         struct blkcipher_walk walk;
746
747         if (unlikely(!sctx->fc))
748                 return fallback_blk_enc(desc, dst, src, nbytes);
749
750         blkcipher_walk_init(&walk, dst, src, nbytes);
751         return ctr_aes_crypt(desc, 0, &walk);
752 }
753
754 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
755                            struct scatterlist *dst, struct scatterlist *src,
756                            unsigned int nbytes)
757 {
758         struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
759         struct blkcipher_walk walk;
760
761         if (unlikely(!sctx->fc))
762                 return fallback_blk_dec(desc, dst, src, nbytes);
763
764         blkcipher_walk_init(&walk, dst, src, nbytes);
765         return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
766 }
767
768 static struct crypto_alg ctr_aes_alg = {
769         .cra_name               =       "ctr(aes)",
770         .cra_driver_name        =       "ctr-aes-s390",
771         .cra_priority           =       400,    /* combo: aes + ctr */
772         .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER |
773                                         CRYPTO_ALG_NEED_FALLBACK,
774         .cra_blocksize          =       1,
775         .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
776         .cra_type               =       &crypto_blkcipher_type,
777         .cra_module             =       THIS_MODULE,
778         .cra_init               =       fallback_init_blk,
779         .cra_exit               =       fallback_exit_blk,
780         .cra_u                  =       {
781                 .blkcipher = {
782                         .min_keysize            =       AES_MIN_KEY_SIZE,
783                         .max_keysize            =       AES_MAX_KEY_SIZE,
784                         .ivsize                 =       AES_BLOCK_SIZE,
785                         .setkey                 =       ctr_aes_set_key,
786                         .encrypt                =       ctr_aes_encrypt,
787                         .decrypt                =       ctr_aes_decrypt,
788                 }
789         }
790 };
791
792 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
793                           unsigned int keylen)
794 {
795         struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
796
797         switch (keylen) {
798         case AES_KEYSIZE_128:
799                 ctx->fc = CPACF_KMA_GCM_AES_128;
800                 break;
801         case AES_KEYSIZE_192:
802                 ctx->fc = CPACF_KMA_GCM_AES_192;
803                 break;
804         case AES_KEYSIZE_256:
805                 ctx->fc = CPACF_KMA_GCM_AES_256;
806                 break;
807         default:
808                 return -EINVAL;
809         }
810
811         memcpy(ctx->key, key, keylen);
812         ctx->key_len = keylen;
813         return 0;
814 }
815
816 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
817 {
818         switch (authsize) {
819         case 4:
820         case 8:
821         case 12:
822         case 13:
823         case 14:
824         case 15:
825         case 16:
826                 break;
827         default:
828                 return -EINVAL;
829         }
830
831         return 0;
832 }
833
834 static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
835                               unsigned int len)
836 {
837         memset(gw, 0, sizeof(*gw));
838         gw->walk_bytes_remain = len;
839         scatterwalk_start(&gw->walk, sg);
840 }
841
842 static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
843 {
844         int n;
845
846         /* minbytesneeded <= AES_BLOCK_SIZE */
847         if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
848                 gw->ptr = gw->buf;
849                 gw->nbytes = gw->buf_bytes;
850                 goto out;
851         }
852
853         if (gw->walk_bytes_remain == 0) {
854                 gw->ptr = NULL;
855                 gw->nbytes = 0;
856                 goto out;
857         }
858
859         gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
860         if (!gw->walk_bytes) {
861                 scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
862                 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
863                                                    gw->walk_bytes_remain);
864         }
865         gw->walk_ptr = scatterwalk_map(&gw->walk);
866
867         if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
868                 gw->ptr = gw->walk_ptr;
869                 gw->nbytes = gw->walk_bytes;
870                 goto out;
871         }
872
873         while (1) {
874                 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
875                 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
876                 gw->buf_bytes += n;
877                 gw->walk_bytes_remain -= n;
878                 scatterwalk_unmap(&gw->walk);
879                 scatterwalk_advance(&gw->walk, n);
880                 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
881
882                 if (gw->buf_bytes >= minbytesneeded) {
883                         gw->ptr = gw->buf;
884                         gw->nbytes = gw->buf_bytes;
885                         goto out;
886                 }
887
888                 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
889                                                    gw->walk_bytes_remain);
890                 if (!gw->walk_bytes) {
891                         scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
892                         gw->walk_bytes = scatterwalk_clamp(&gw->walk,
893                                                         gw->walk_bytes_remain);
894                 }
895                 gw->walk_ptr = scatterwalk_map(&gw->walk);
896         }
897
898 out:
899         return gw->nbytes;
900 }
901
902 static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
903 {
904         int n;
905
906         if (gw->ptr == NULL)
907                 return;
908
909         if (gw->ptr == gw->buf) {
910                 n = gw->buf_bytes - bytesdone;
911                 if (n > 0) {
912                         memmove(gw->buf, gw->buf + bytesdone, n);
913                         gw->buf_bytes -= n;
914                 } else
915                         gw->buf_bytes = 0;
916         } else {
917                 gw->walk_bytes_remain -= bytesdone;
918                 scatterwalk_unmap(&gw->walk);
919                 scatterwalk_advance(&gw->walk, bytesdone);
920                 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
921         }
922 }
923
924 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
925 {
926         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
927         struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
928         unsigned int ivsize = crypto_aead_ivsize(tfm);
929         unsigned int taglen = crypto_aead_authsize(tfm);
930         unsigned int aadlen = req->assoclen;
931         unsigned int pclen = req->cryptlen;
932         int ret = 0;
933
934         unsigned int len, in_bytes, out_bytes,
935                      min_bytes, bytes, aad_bytes, pc_bytes;
936         struct gcm_sg_walk gw_in, gw_out;
937         u8 tag[GHASH_DIGEST_SIZE];
938
939         struct {
940                 u32 _[3];               /* reserved */
941                 u32 cv;                 /* Counter Value */
942                 u8 t[GHASH_DIGEST_SIZE];/* Tag */
943                 u8 h[AES_BLOCK_SIZE];   /* Hash-subkey */
944                 u64 taadl;              /* Total AAD Length */
945                 u64 tpcl;               /* Total Plain-/Cipher-text Length */
946                 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
947                 u8 k[AES_MAX_KEY_SIZE]; /* Key */
948         } param;
949
950         /*
951          * encrypt
952          *   req->src: aad||plaintext
953          *   req->dst: aad||ciphertext||tag
954          * decrypt
955          *   req->src: aad||ciphertext||tag
956          *   req->dst: aad||plaintext, return 0 or -EBADMSG
957          * aad, plaintext and ciphertext may be empty.
958          */
959         if (flags & CPACF_DECRYPT)
960                 pclen -= taglen;
961         len = aadlen + pclen;
962
963         memset(&param, 0, sizeof(param));
964         param.cv = 1;
965         param.taadl = aadlen * 8;
966         param.tpcl = pclen * 8;
967         memcpy(param.j0, req->iv, ivsize);
968         *(u32 *)(param.j0 + ivsize) = 1;
969         memcpy(param.k, ctx->key, ctx->key_len);
970
971         gcm_sg_walk_start(&gw_in, req->src, len);
972         gcm_sg_walk_start(&gw_out, req->dst, len);
973
974         do {
975                 min_bytes = min_t(unsigned int,
976                                   aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
977                 in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
978                 out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
979                 bytes = min(in_bytes, out_bytes);
980
981                 if (aadlen + pclen <= bytes) {
982                         aad_bytes = aadlen;
983                         pc_bytes = pclen;
984                         flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
985                 } else {
986                         if (aadlen <= bytes) {
987                                 aad_bytes = aadlen;
988                                 pc_bytes = (bytes - aadlen) &
989                                            ~(AES_BLOCK_SIZE - 1);
990                                 flags |= CPACF_KMA_LAAD;
991                         } else {
992                                 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
993                                 pc_bytes = 0;
994                         }
995                 }
996
997                 if (aad_bytes > 0)
998                         memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
999
1000                 cpacf_kma(ctx->fc | flags, &param,
1001                           gw_out.ptr + aad_bytes,
1002                           gw_in.ptr + aad_bytes, pc_bytes,
1003                           gw_in.ptr, aad_bytes);
1004
1005                 gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
1006                 gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
1007                 aadlen -= aad_bytes;
1008                 pclen -= pc_bytes;
1009         } while (aadlen + pclen > 0);
1010
1011         if (flags & CPACF_DECRYPT) {
1012                 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1013                 if (crypto_memneq(tag, param.t, taglen))
1014                         ret = -EBADMSG;
1015         } else
1016                 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1017
1018         memzero_explicit(&param, sizeof(param));
1019         return ret;
1020 }
1021
1022 static int gcm_aes_encrypt(struct aead_request *req)
1023 {
1024         return gcm_aes_crypt(req, CPACF_ENCRYPT);
1025 }
1026
1027 static int gcm_aes_decrypt(struct aead_request *req)
1028 {
1029         return gcm_aes_crypt(req, CPACF_DECRYPT);
1030 }
1031
1032 static struct aead_alg gcm_aes_aead = {
1033         .setkey                 = gcm_aes_setkey,
1034         .setauthsize            = gcm_aes_setauthsize,
1035         .encrypt                = gcm_aes_encrypt,
1036         .decrypt                = gcm_aes_decrypt,
1037
1038         .ivsize                 = GHASH_BLOCK_SIZE - sizeof(u32),
1039         .maxauthsize            = GHASH_DIGEST_SIZE,
1040         .chunksize              = AES_BLOCK_SIZE,
1041
1042         .base                   = {
1043                 .cra_flags              = CRYPTO_ALG_TYPE_AEAD,
1044                 .cra_blocksize          = 1,
1045                 .cra_ctxsize            = sizeof(struct s390_aes_ctx),
1046                 .cra_priority           = 900,
1047                 .cra_name               = "gcm(aes)",
1048                 .cra_driver_name        = "gcm-aes-s390",
1049                 .cra_module             = THIS_MODULE,
1050         },
1051 };
1052
1053 static struct crypto_alg *aes_s390_algs_ptr[5];
1054 static int aes_s390_algs_num;
1055
1056 static int aes_s390_register_alg(struct crypto_alg *alg)
1057 {
1058         int ret;
1059
1060         ret = crypto_register_alg(alg);
1061         if (!ret)
1062                 aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
1063         return ret;
1064 }
1065
1066 static void aes_s390_fini(void)
1067 {
1068         while (aes_s390_algs_num--)
1069                 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
1070         if (ctrblk)
1071                 free_page((unsigned long) ctrblk);
1072
1073         crypto_unregister_aead(&gcm_aes_aead);
1074 }
1075
1076 static int __init aes_s390_init(void)
1077 {
1078         int ret;
1079
1080         /* Query available functions for KM, KMC, KMCTR and KMA */
1081         cpacf_query(CPACF_KM, &km_functions);
1082         cpacf_query(CPACF_KMC, &kmc_functions);
1083         cpacf_query(CPACF_KMCTR, &kmctr_functions);
1084         cpacf_query(CPACF_KMA, &kma_functions);
1085
1086         if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1087             cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1088             cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1089                 ret = aes_s390_register_alg(&aes_alg);
1090                 if (ret)
1091                         goto out_err;
1092                 ret = aes_s390_register_alg(&ecb_aes_alg);
1093                 if (ret)
1094                         goto out_err;
1095         }
1096
1097         if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1098             cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1099             cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1100                 ret = aes_s390_register_alg(&cbc_aes_alg);
1101                 if (ret)
1102                         goto out_err;
1103         }
1104
1105         if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1106             cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1107                 ret = aes_s390_register_alg(&xts_aes_alg);
1108                 if (ret)
1109                         goto out_err;
1110         }
1111
1112         if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1113             cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1114             cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1115                 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1116                 if (!ctrblk) {
1117                         ret = -ENOMEM;
1118                         goto out_err;
1119                 }
1120                 ret = aes_s390_register_alg(&ctr_aes_alg);
1121                 if (ret)
1122                         goto out_err;
1123         }
1124
1125         if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1126             cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1127             cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1128                 ret = crypto_register_aead(&gcm_aes_aead);
1129                 if (ret)
1130                         goto out_err;
1131         }
1132
1133         return 0;
1134 out_err:
1135         aes_s390_fini();
1136         return ret;
1137 }
1138
1139 module_cpu_feature_match(MSA, aes_s390_init);
1140 module_exit(aes_s390_fini);
1141
1142 MODULE_ALIAS_CRYPTO("aes-all");
1143
1144 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1145 MODULE_LICENSE("GPL");