]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
crypto: arm64/ghash - switch to AES library
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 2 Jul 2019 19:41:28 +0000 (21:41 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 26 Jul 2019 04:56:04 +0000 (14:56 +1000)
The GHASH code uses the generic AES key expansion routines, and calls
directly into the scalar table based AES cipher for arm64 from the
fallback path, and since this implementation is known to be non-time
invariant, doing so from a time invariant SIMD cipher is a bit nasty.

So let's switch to the AES library - this makes the code more robust,
and drops the dependency on the generic AES cipher, allowing us to
omit it entirely in the future.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/arm64/crypto/Kconfig
arch/arm64/crypto/ghash-ce-glue.c

index d9a523ecdd836d54e5742135b6ff9e2fb4c7d520..1762055e70936ac539e210bd59152376e32973ca 100644 (file)
@@ -58,8 +58,7 @@ config CRYPTO_GHASH_ARM64_CE
        depends on KERNEL_MODE_NEON
        select CRYPTO_HASH
        select CRYPTO_GF128MUL
-       select CRYPTO_AES
-       select CRYPTO_AES_ARM64
+       select CRYPTO_LIB_AES
 
 config CRYPTO_CRCT10DIF_ARM64_CE
        tristate "CRCT10DIF digest algorithm using PMULL instructions"
index 16c5da9be9fb436912cec1a4a1fed669938261a6..70b1469783f9bd376465369d1df328f193efc642 100644 (file)
@@ -70,8 +70,6 @@ asmlinkage void pmull_gcm_decrypt(int blocks, u64 dg[], u8 dst[],
 asmlinkage void pmull_gcm_encrypt_block(u8 dst[], u8 const src[],
                                        u32 const rk[], int rounds);
 
-asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
-
 static int ghash_init(struct shash_desc *desc)
 {
        struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
@@ -309,14 +307,13 @@ static int gcm_setkey(struct crypto_aead *tfm, const u8 *inkey,
        u8 key[GHASH_BLOCK_SIZE];
        int ret;
 
-       ret = crypto_aes_expand_key(&ctx->aes_key, inkey, keylen);
+       ret = aes_expandkey(&ctx->aes_key, inkey, keylen);
        if (ret) {
                tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
                return -EINVAL;
        }
 
-       __aes_arm64_encrypt(ctx->aes_key.key_enc, key, (u8[AES_BLOCK_SIZE]){},
-                           num_rounds(&ctx->aes_key));
+       aes_encrypt(&ctx->aes_key, key, (u8[AES_BLOCK_SIZE]){});
 
        return __ghash_setkey(&ctx->ghash_key, key, sizeof(be128));
 }
@@ -467,7 +464,7 @@ static int gcm_encrypt(struct aead_request *req)
                        rk = ctx->aes_key.key_enc;
                } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
        } else {
-               __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
+               aes_encrypt(&ctx->aes_key, tag, iv);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
                while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -478,8 +475,7 @@ static int gcm_encrypt(struct aead_request *req)
                        int remaining = blocks;
 
                        do {
-                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
-                                                   ks, iv, nrounds);
+                               aes_encrypt(&ctx->aes_key, ks, iv);
                                crypto_xor_cpy(dst, src, ks, AES_BLOCK_SIZE);
                                crypto_inc(iv, AES_BLOCK_SIZE);
 
@@ -495,13 +491,10 @@ static int gcm_encrypt(struct aead_request *req)
                                                 walk.nbytes % (2 * AES_BLOCK_SIZE));
                }
                if (walk.nbytes) {
-                       __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
-                                           nrounds);
+                       aes_encrypt(&ctx->aes_key, ks, iv);
                        if (walk.nbytes > AES_BLOCK_SIZE) {
                                crypto_inc(iv, AES_BLOCK_SIZE);
-                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
-                                                   ks + AES_BLOCK_SIZE, iv,
-                                                   nrounds);
+                               aes_encrypt(&ctx->aes_key, ks + AES_BLOCK_SIZE, iv);
                        }
                }
        }
@@ -605,7 +598,7 @@ static int gcm_decrypt(struct aead_request *req)
                        rk = ctx->aes_key.key_enc;
                } while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
        } else {
-               __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
+               aes_encrypt(&ctx->aes_key, tag, iv);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
                while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
@@ -618,8 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
                                        pmull_ghash_update_p64);
 
                        do {
-                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
-                                                   buf, iv, nrounds);
+                               aes_encrypt(&ctx->aes_key, buf, iv);
                                crypto_xor_cpy(dst, src, buf, AES_BLOCK_SIZE);
                                crypto_inc(iv, AES_BLOCK_SIZE);
 
@@ -637,11 +629,9 @@ static int gcm_decrypt(struct aead_request *req)
                                memcpy(iv2, iv, AES_BLOCK_SIZE);
                                crypto_inc(iv2, AES_BLOCK_SIZE);
 
-                               __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
-                                                   iv2, nrounds);
+                               aes_encrypt(&ctx->aes_key, iv2, iv2);
                        }
-                       __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
-                                           nrounds);
+                       aes_encrypt(&ctx->aes_key, iv, iv);
                }
        }