]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/md/dm-crypt.c
usb: hso: obey DMA rules in tiocmget
[linux.git] / drivers / md / dm-crypt.c
index d5216bcc464960b4fc748415c1c8e32abc523321..f87f6495652f5966ab2fce72b252a2016b711186 100644 (file)
@@ -98,11 +98,6 @@ struct crypt_iv_operations {
                    struct dm_crypt_request *dmreq);
 };
 
-struct iv_essiv_private {
-       struct crypto_shash *hash_tfm;
-       u8 *salt;
-};
-
 struct iv_benbi_private {
        int shift;
 };
@@ -120,10 +115,6 @@ struct iv_tcw_private {
        u8 *whitening;
 };
 
-struct iv_eboiv_private {
-       struct crypto_cipher *tfm;
-};
-
 /*
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
@@ -152,26 +143,21 @@ struct crypt_config {
        struct task_struct *write_thread;
        struct rb_root write_tree;
 
-       char *cipher;
        char *cipher_string;
        char *cipher_auth;
        char *key_string;
 
        const struct crypt_iv_operations *iv_gen_ops;
        union {
-               struct iv_essiv_private essiv;
                struct iv_benbi_private benbi;
                struct iv_lmk_private lmk;
                struct iv_tcw_private tcw;
-               struct iv_eboiv_private eboiv;
        } iv_gen_private;
        u64 iv_offset;
        unsigned int iv_size;
        unsigned short int sector_size;
        unsigned char sector_shift;
 
-       /* ESSIV: struct crypto_cipher *essiv_tfm */
-       void *iv_private;
        union {
                struct crypto_skcipher **tfms;
                struct crypto_aead **tfms_aead;
@@ -329,157 +315,15 @@ static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
        return 0;
 }
 
-/* Initialise ESSIV - compute salt but no local memory allocations */
-static int crypt_iv_essiv_init(struct crypt_config *cc)
-{
-       struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       SHASH_DESC_ON_STACK(desc, essiv->hash_tfm);
-       struct crypto_cipher *essiv_tfm;
-       int err;
-
-       desc->tfm = essiv->hash_tfm;
-
-       err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
-       shash_desc_zero(desc);
-       if (err)
-               return err;
-
-       essiv_tfm = cc->iv_private;
-
-       err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
-                           crypto_shash_digestsize(essiv->hash_tfm));
-       if (err)
-               return err;
-
-       return 0;
-}
-
-/* Wipe salt and reset key derived from volume key */
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
-{
-       struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       unsigned salt_size = crypto_shash_digestsize(essiv->hash_tfm);
-       struct crypto_cipher *essiv_tfm;
-       int r, err = 0;
-
-       memset(essiv->salt, 0, salt_size);
-
-       essiv_tfm = cc->iv_private;
-       r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
-       if (r)
-               err = r;
-
-       return err;
-}
-
-/* Allocate the cipher for ESSIV */
-static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
-                                               struct dm_target *ti,
-                                               const u8 *salt,
-                                               unsigned int saltsize)
-{
-       struct crypto_cipher *essiv_tfm;
-       int err;
-
-       /* Setup the essiv_tfm with the given salt */
-       essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
-       if (IS_ERR(essiv_tfm)) {
-               ti->error = "Error allocating crypto tfm for ESSIV";
-               return essiv_tfm;
-       }
-
-       if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
-               ti->error = "Block size of ESSIV cipher does "
-                           "not match IV size of block cipher";
-               crypto_free_cipher(essiv_tfm);
-               return ERR_PTR(-EINVAL);
-       }
-
-       err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
-       if (err) {
-               ti->error = "Failed to set key for ESSIV cipher";
-               crypto_free_cipher(essiv_tfm);
-               return ERR_PTR(err);
-       }
-
-       return essiv_tfm;
-}
-
-static void crypt_iv_essiv_dtr(struct crypt_config *cc)
-{
-       struct crypto_cipher *essiv_tfm;
-       struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-
-       crypto_free_shash(essiv->hash_tfm);
-       essiv->hash_tfm = NULL;
-
-       kzfree(essiv->salt);
-       essiv->salt = NULL;
-
-       essiv_tfm = cc->iv_private;
-
-       if (essiv_tfm)
-               crypto_free_cipher(essiv_tfm);
-
-       cc->iv_private = NULL;
-}
-
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
-                             const char *opts)
-{
-       struct crypto_cipher *essiv_tfm = NULL;
-       struct crypto_shash *hash_tfm = NULL;
-       u8 *salt = NULL;
-       int err;
-
-       if (!opts) {
-               ti->error = "Digest algorithm missing for ESSIV mode";
-               return -EINVAL;
-       }
-
-       /* Allocate hash algorithm */
-       hash_tfm = crypto_alloc_shash(opts, 0, 0);
-       if (IS_ERR(hash_tfm)) {
-               ti->error = "Error initializing ESSIV hash";
-               err = PTR_ERR(hash_tfm);
-               goto bad;
-       }
-
-       salt = kzalloc(crypto_shash_digestsize(hash_tfm), GFP_KERNEL);
-       if (!salt) {
-               ti->error = "Error kmallocing salt storage in ESSIV";
-               err = -ENOMEM;
-               goto bad;
-       }
-
-       cc->iv_gen_private.essiv.salt = salt;
-       cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
-
-       essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
-                                      crypto_shash_digestsize(hash_tfm));
-       if (IS_ERR(essiv_tfm)) {
-               crypt_iv_essiv_dtr(cc);
-               return PTR_ERR(essiv_tfm);
-       }
-       cc->iv_private = essiv_tfm;
-
-       return 0;
-
-bad:
-       if (hash_tfm && !IS_ERR(hash_tfm))
-               crypto_free_shash(hash_tfm);
-       kfree(salt);
-       return err;
-}
-
 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
                              struct dm_crypt_request *dmreq)
 {
-       struct crypto_cipher *essiv_tfm = cc->iv_private;
-
+       /*
+        * ESSIV encryption of the IV is now handled by the crypto API,
+        * so just pass the plain sector number here.
+        */
        memset(iv, 0, cc->iv_size);
        *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
-       crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
 
        return 0;
 }
@@ -847,65 +691,47 @@ static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
        return 0;
 }
 
-static void crypt_iv_eboiv_dtr(struct crypt_config *cc)
-{
-       struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
-
-       crypto_free_cipher(eboiv->tfm);
-       eboiv->tfm = NULL;
-}
-
 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
                            const char *opts)
 {
-       struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
-       struct crypto_cipher *tfm;
-
-       tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
-       if (IS_ERR(tfm)) {
-               ti->error = "Error allocating crypto tfm for EBOIV";
-               return PTR_ERR(tfm);
+       if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags)) {
+               ti->error = "AEAD transforms not supported for EBOIV";
+               return -EINVAL;
        }
 
-       if (crypto_cipher_blocksize(tfm) != cc->iv_size) {
+       if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
                ti->error = "Block size of EBOIV cipher does "
                            "not match IV size of block cipher";
-               crypto_free_cipher(tfm);
                return -EINVAL;
        }
 
-       eboiv->tfm = tfm;
        return 0;
 }
 
-static int crypt_iv_eboiv_init(struct crypt_config *cc)
+static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
+                           struct dm_crypt_request *dmreq)
 {
-       struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
+       u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
+       struct skcipher_request *req;
+       struct scatterlist src, dst;
+       struct crypto_wait wait;
        int err;
 
-       err = crypto_cipher_setkey(eboiv->tfm, cc->key, cc->key_size);
-       if (err)
-               return err;
+       req = skcipher_request_alloc(any_tfm(cc), GFP_KERNEL | GFP_NOFS);
+       if (!req)
+               return -ENOMEM;
 
-       return 0;
-}
+       memset(buf, 0, cc->iv_size);
+       *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
 
-static int crypt_iv_eboiv_wipe(struct crypt_config *cc)
-{
-       /* Called after cc->key is set to random key in crypt_wipe() */
-       return crypt_iv_eboiv_init(cc);
-}
+       sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
+       sg_init_one(&dst, iv, cc->iv_size);
+       skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
+       skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+       err = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+       skcipher_request_free(req);
 
-static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
-                           struct dm_crypt_request *dmreq)
-{
-       struct iv_eboiv_private *eboiv = &cc->iv_gen_private.eboiv;
-
-       memset(iv, 0, cc->iv_size);
-       *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
-       crypto_cipher_encrypt_one(eboiv->tfm, iv, iv);
-
-       return 0;
+       return err;
 }
 
 static const struct crypt_iv_operations crypt_iv_plain_ops = {
@@ -921,10 +747,6 @@ static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
 };
 
 static const struct crypt_iv_operations crypt_iv_essiv_ops = {
-       .ctr       = crypt_iv_essiv_ctr,
-       .dtr       = crypt_iv_essiv_dtr,
-       .init      = crypt_iv_essiv_init,
-       .wipe      = crypt_iv_essiv_wipe,
        .generator = crypt_iv_essiv_gen
 };
 
@@ -962,9 +784,6 @@ static struct crypt_iv_operations crypt_iv_random_ops = {
 
 static struct crypt_iv_operations crypt_iv_eboiv_ops = {
        .ctr       = crypt_iv_eboiv_ctr,
-       .dtr       = crypt_iv_eboiv_dtr,
-       .init      = crypt_iv_eboiv_init,
-       .wipe      = crypt_iv_eboiv_wipe,
        .generator = crypt_iv_eboiv_gen
 };
 
@@ -2320,7 +2139,6 @@ static void crypt_dtr(struct dm_target *ti)
        if (cc->dev)
                dm_put_device(ti, cc->dev);
 
-       kzfree(cc->cipher);
        kzfree(cc->cipher_string);
        kzfree(cc->key_string);
        kzfree(cc->cipher_auth);
@@ -2401,52 +2219,6 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
        return 0;
 }
 
-/*
- * Workaround to parse cipher algorithm from crypto API spec.
- * The cc->cipher is currently used only in ESSIV.
- * This should be probably done by crypto-api calls (once available...)
- */
-static int crypt_ctr_blkdev_cipher(struct crypt_config *cc)
-{
-       const char *alg_name = NULL;
-       char *start, *end;
-
-       if (crypt_integrity_aead(cc)) {
-               alg_name = crypto_tfm_alg_name(crypto_aead_tfm(any_tfm_aead(cc)));
-               if (!alg_name)
-                       return -EINVAL;
-               if (crypt_integrity_hmac(cc)) {
-                       alg_name = strchr(alg_name, ',');
-                       if (!alg_name)
-                               return -EINVAL;
-               }
-               alg_name++;
-       } else {
-               alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(any_tfm(cc)));
-               if (!alg_name)
-                       return -EINVAL;
-       }
-
-       start = strchr(alg_name, '(');
-       end = strchr(alg_name, ')');
-
-       if (!start && !end) {
-               cc->cipher = kstrdup(alg_name, GFP_KERNEL);
-               return cc->cipher ? 0 : -ENOMEM;
-       }
-
-       if (!start || !end || ++start >= end)
-               return -EINVAL;
-
-       cc->cipher = kzalloc(end - start + 1, GFP_KERNEL);
-       if (!cc->cipher)
-               return -ENOMEM;
-
-       strncpy(cc->cipher, start, end - start);
-
-       return 0;
-}
-
 /*
  * Workaround to parse HMAC algorithm from AEAD crypto API spec.
  * The HMAC is needed to calculate tag size (HMAC digest size).
@@ -2490,7 +2262,7 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
                                char **ivmode, char **ivopts)
 {
        struct crypt_config *cc = ti->private;
-       char *tmp, *cipher_api;
+       char *tmp, *cipher_api, buf[CRYPTO_MAX_ALG_NAME];
        int ret = -EINVAL;
 
        cc->tfms_count = 1;
@@ -2516,9 +2288,32 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
        /* The rest is crypto API spec */
        cipher_api = tmp;
 
+       /* Alloc AEAD, can be used only in new format. */
+       if (crypt_integrity_aead(cc)) {
+               ret = crypt_ctr_auth_cipher(cc, cipher_api);
+               if (ret < 0) {
+                       ti->error = "Invalid AEAD cipher spec";
+                       return -ENOMEM;
+               }
+       }
+
        if (*ivmode && !strcmp(*ivmode, "lmk"))
                cc->tfms_count = 64;
 
+       if (*ivmode && !strcmp(*ivmode, "essiv")) {
+               if (!*ivopts) {
+                       ti->error = "Digest algorithm missing for ESSIV mode";
+                       return -EINVAL;
+               }
+               ret = snprintf(buf, CRYPTO_MAX_ALG_NAME, "essiv(%s,%s)",
+                              cipher_api, *ivopts);
+               if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
+                       ti->error = "Cannot allocate cipher string";
+                       return -ENOMEM;
+               }
+               cipher_api = buf;
+       }
+
        cc->key_parts = cc->tfms_count;
 
        /* Allocate cipher */
@@ -2528,23 +2323,11 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
                return ret;
        }
 
-       /* Alloc AEAD, can be used only in new format. */
-       if (crypt_integrity_aead(cc)) {
-               ret = crypt_ctr_auth_cipher(cc, cipher_api);
-               if (ret < 0) {
-                       ti->error = "Invalid AEAD cipher spec";
-                       return -ENOMEM;
-               }
+       if (crypt_integrity_aead(cc))
                cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
-       else
+       else
                cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
 
-       ret = crypt_ctr_blkdev_cipher(cc);
-       if (ret < 0) {
-               ti->error = "Cannot allocate cipher string";
-               return -ENOMEM;
-       }
-
        return 0;
 }
 
@@ -2579,10 +2362,6 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
        }
        cc->key_parts = cc->tfms_count;
 
-       cc->cipher = kstrdup(cipher, GFP_KERNEL);
-       if (!cc->cipher)
-               goto bad_mem;
-
        chainmode = strsep(&tmp, "-");
        *ivmode = strsep(&tmp, ":");
        *ivopts = tmp;
@@ -2605,9 +2384,19 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
        if (!cipher_api)
                goto bad_mem;
 
-       ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
-                      "%s(%s)", chainmode, cipher);
-       if (ret < 0) {
+       if (*ivmode && !strcmp(*ivmode, "essiv")) {
+               if (!*ivopts) {
+                       ti->error = "Digest algorithm missing for ESSIV mode";
+                       kfree(cipher_api);
+                       return -EINVAL;
+               }
+               ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+                              "essiv(%s(%s),%s)", chainmode, cipher, *ivopts);
+       } else {
+               ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
+                              "%s(%s)", chainmode, cipher);
+       }
+       if (ret < 0 || ret >= CRYPTO_MAX_ALG_NAME) {
                kfree(cipher_api);
                goto bad_mem;
        }