]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net/tls: don't copy negative amounts of data in reencrypt
authorJakub Kicinski <jakub.kicinski@netronome.com>
Fri, 26 Apr 2019 00:35:09 +0000 (17:35 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 28 Apr 2019 00:17:19 +0000 (20:17 -0400)
There is no guarantee the record starts before the skb frags.
If we don't check for this condition copy amount will get
negative, leading to reads and writes to random memory locations.
Familiar hilarity ensues.

Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload")
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/tls/tls_device.c

index cc0256939eb63e2afef7dddb33f57583d1387cc3..96357060addc5ab2a2c9e15664e7477e5ab4b256 100644 (file)
@@ -628,14 +628,16 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
        else
                err = 0;
 
-       copy = min_t(int, skb_pagelen(skb) - offset,
-                    rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+       if (skb_pagelen(skb) > offset) {
+               copy = min_t(int, skb_pagelen(skb) - offset,
+                            rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
 
-       if (skb->decrypted)
-               skb_store_bits(skb, offset, buf, copy);
+               if (skb->decrypted)
+                       skb_store_bits(skb, offset, buf, copy);
 
-       offset += copy;
-       buf += copy;
+               offset += copy;
+               buf += copy;
+       }
 
        skb_walk_frags(skb, skb_iter) {
                copy = min_t(int, skb_iter->len,