]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
tls: convert to generic sk_msg interface
authorDaniel Borkmann <daniel@iogearbox.net>
Sat, 13 Oct 2018 00:45:59 +0000 (02:45 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 15 Oct 2018 19:23:19 +0000 (12:23 -0700)
Convert kTLS over to make use of sk_msg interface for plaintext and
encrypted scattergather data, so it reuses all the sk_msg helpers
and data structure which later on in a second step enables to glue
this to BPF.

This also allows to remove quite a bit of open coded helpers which
are covered by the sk_msg API. Recent changes in kTLs 80ece6a03aaf
("tls: Remove redundant vars from tls record structure") and
4e6d47206c32 ("tls: Add support for inplace records encryption")
changed the data path handling a bit; while we've kept the latter
optimization intact, we had to undo the former change to better
fit the sk_msg model, hence the sg_aead_in and sg_aead_out have
been brought back and are linked into the sk_msg sgs. Now the kTLS
record contains a msg_plaintext and msg_encrypted sk_msg each.

In the original code, the zerocopy_from_iter() has been used out
of TX but also RX path. For the strparser skb-based RX path,
we've left the zerocopy_from_iter() in decrypt_internal() mostly
untouched, meaning it has been moved into tls_setup_from_iter()
with charging logic removed (as not used from RX). Given RX path
is not based on sk_msg objects, we haven't pursued setting up a
dummy sk_msg to call into sk_msg_zerocopy_from_iter(), but it
could be an option to prusue in a later step.

Joint work with John.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/skmsg.h
include/net/sock.h
include/net/tls.h
net/core/skmsg.c
net/core/sock.c
net/tls/Kconfig
net/tls/tls_device.c
net/tls/tls_sw.c

index 95678103c4a050380116def5472fc391c882d64a..4e84b3c2eff8ac23d4dfd49afefac67bc4c0d8fc 100644 (file)
@@ -102,6 +102,8 @@ struct sk_psock {
 
 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
                 int elem_first_coalesce);
+int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
+                u32 off, u32 len);
 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
index 751549ac0a849144ab0382203ee5c877374523e2..7470c45d182d9115d7ce781e61aad9433c782cbd 100644 (file)
@@ -2214,10 +2214,6 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
 
 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
 
-int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
-               int sg_start, int *sg_curr, unsigned int *sg_size,
-               int first_coalesce);
-
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
  */
index 5e853835597e7f8fee2a61052bc58e16b0f003b6..3d22d8a59be70074b14acac27a34272678f4f33c 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/crypto.h>
 #include <linux/socket.h>
 #include <linux/tcp.h>
+#include <linux/skmsg.h>
+
 #include <net/tcp.h>
 #include <net/strparser.h>
 #include <crypto/aead.h>
@@ -103,15 +105,13 @@ struct tls_rec {
        int tx_flags;
        int inplace_crypto;
 
-       /* AAD | sg_plaintext_data | sg_tag */
-       struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS + 1];
-       /* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
-       struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS + 1];
+       struct sk_msg msg_plaintext;
+       struct sk_msg msg_encrypted;
 
-       unsigned int sg_plaintext_size;
-       unsigned int sg_encrypted_size;
-       int sg_plaintext_num_elem;
-       int sg_encrypted_num_elem;
+       /* AAD | msg_plaintext.sg.data | sg_tag */
+       struct scatterlist sg_aead_in[2];
+       /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */
+       struct scatterlist sg_aead_out[2];
 
        char aad_space[TLS_AAD_SPACE_SIZE];
        struct aead_request aead_req;
@@ -223,8 +223,8 @@ struct tls_context {
 
        unsigned long flags;
        bool in_tcp_sendpages;
+       bool pending_open_record_frags;
 
-       u16 pending_open_record_frags;
        int (*push_pending_record)(struct sock *sk, int flags);
 
        void (*sk_write_space)(struct sock *sk);
index ae2b281c9c57bc6dd85e211e967829a81c6da306..56a99d0c9aa08db3bc294e129aa571325bd335cb 100644 (file)
@@ -73,6 +73,45 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
 }
 EXPORT_SYMBOL_GPL(sk_msg_alloc);
 
+int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
+                u32 off, u32 len)
+{
+       int i = src->sg.start;
+       struct scatterlist *sge = sk_msg_elem(src, i);
+       u32 sge_len, sge_off;
+
+       if (sk_msg_full(dst))
+               return -ENOSPC;
+
+       while (off) {
+               if (sge->length > off)
+                       break;
+               off -= sge->length;
+               sk_msg_iter_var_next(i);
+               if (i == src->sg.end && off)
+                       return -ENOSPC;
+               sge = sk_msg_elem(src, i);
+       }
+
+       while (len) {
+               sge_len = sge->length - off;
+               sge_off = sge->offset + off;
+               if (sge_len > len)
+                       sge_len = len;
+               off = 0;
+               len -= sge_len;
+               sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
+               sk_mem_charge(sk, sge_len);
+               sk_msg_iter_var_next(i);
+               if (i == src->sg.end && len)
+                       return -ENOSPC;
+               sge = sk_msg_elem(src, i);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(sk_msg_clone);
+
 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 {
        int i = msg->sg.start;
index 7e8796a6a0892efbb7dfce67d12b8062b2d5daa9..52e4f1c16b1e5870a1992665ba1f5798bdf0025b 100644 (file)
@@ -2238,67 +2238,6 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
 }
 EXPORT_SYMBOL(sk_page_frag_refill);
 
-int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
-               int sg_start, int *sg_curr_index, unsigned int *sg_curr_size,
-               int first_coalesce)
-{
-       int sg_curr = *sg_curr_index, use = 0, rc = 0;
-       unsigned int size = *sg_curr_size;
-       struct page_frag *pfrag;
-       struct scatterlist *sge;
-
-       len -= size;
-       pfrag = sk_page_frag(sk);
-
-       while (len > 0) {
-               unsigned int orig_offset;
-
-               if (!sk_page_frag_refill(sk, pfrag)) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               use = min_t(int, len, pfrag->size - pfrag->offset);
-
-               if (!sk_wmem_schedule(sk, use)) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-
-               sk_mem_charge(sk, use);
-               size += use;
-               orig_offset = pfrag->offset;
-               pfrag->offset += use;
-
-               sge = sg + sg_curr - 1;
-               if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
-                   sge->offset + sge->length == orig_offset) {
-                       sge->length += use;
-               } else {
-                       sge = sg + sg_curr;
-                       sg_unmark_end(sge);
-                       sg_set_page(sge, pfrag->page, use, orig_offset);
-                       get_page(pfrag->page);
-                       sg_curr++;
-
-                       if (sg_curr == MAX_SKB_FRAGS)
-                               sg_curr = 0;
-
-                       if (sg_curr == sg_start) {
-                               rc = -ENOSPC;
-                               break;
-                       }
-               }
-
-               len -= use;
-       }
-out:
-       *sg_curr_size = size;
-       *sg_curr_index = sg_curr;
-       return rc;
-}
-EXPORT_SYMBOL(sk_alloc_sg);
-
 static void __lock_sock(struct sock *sk)
        __releases(&sk->sk_lock.slock)
        __acquires(&sk->sk_lock.slock)
index 73f05ece53d0c955df2bac6d043e1e354d8fb7dc..99c1a19c17b1dc7dba66c1f6791f7b8e61a733e9 100644 (file)
@@ -8,6 +8,7 @@ config TLS
        select CRYPTO_AES
        select CRYPTO_GCM
        select STREAM_PARSER
+       select NET_SOCK_MSG
        default n
        ---help---
        Enable kernel support for TLS protocol. This allows symmetric
index 961b07d4d41ca7a677b1fd0d24f490888d784e0a..276edbc04f3859efe96540e8e4705e730bcba56f 100644 (file)
@@ -421,7 +421,7 @@ static int tls_push_data(struct sock *sk,
                        tls_push_record_flags = flags;
                        if (more) {
                                tls_ctx->pending_open_record_frags =
-                                               record->num_frags;
+                                               !!record->num_frags;
                                break;
                        }
 
index aa9fdce272b62c7fd60808e1b5c416f591e7ce64..5043b0be1448955e8269c6d3b0c7bb552458acef 100644 (file)
@@ -213,153 +213,49 @@ static int tls_do_decryption(struct sock *sk,
        return ret;
 }
 
-static void trim_sg(struct sock *sk, struct scatterlist *sg,
-                   int *sg_num_elem, unsigned int *sg_size, int target_size)
-{
-       int i = *sg_num_elem - 1;
-       int trim = *sg_size - target_size;
-
-       if (trim <= 0) {
-               WARN_ON(trim < 0);
-               return;
-       }
-
-       *sg_size = target_size;
-       while (trim >= sg[i].length) {
-               trim -= sg[i].length;
-               sk_mem_uncharge(sk, sg[i].length);
-               put_page(sg_page(&sg[i]));
-               i--;
-
-               if (i < 0)
-                       goto out;
-       }
-
-       sg[i].length -= trim;
-       sk_mem_uncharge(sk, trim);
-
-out:
-       *sg_num_elem = i + 1;
-}
-
-static void trim_both_sgl(struct sock *sk, int target_size)
+static void tls_trim_both_msgs(struct sock *sk, int target_size)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
 
-       trim_sg(sk, &rec->sg_plaintext_data[1],
-               &rec->sg_plaintext_num_elem,
-               &rec->sg_plaintext_size,
-               target_size);
-
+       sk_msg_trim(sk, &rec->msg_plaintext, target_size);
        if (target_size > 0)
                target_size += tls_ctx->tx.overhead_size;
-
-       trim_sg(sk, &rec->sg_encrypted_data[1],
-               &rec->sg_encrypted_num_elem,
-               &rec->sg_encrypted_size,
-               target_size);
+       sk_msg_trim(sk, &rec->msg_encrypted, target_size);
 }
 
-static int alloc_encrypted_sg(struct sock *sk, int len)
+static int tls_alloc_encrypted_msg(struct sock *sk, int len)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
-       int rc = 0;
-
-       rc = sk_alloc_sg(sk, len,
-                        &rec->sg_encrypted_data[1], 0,
-                        &rec->sg_encrypted_num_elem,
-                        &rec->sg_encrypted_size, 0);
-
-       if (rc == -ENOSPC)
-               rec->sg_encrypted_num_elem =
-                       ARRAY_SIZE(rec->sg_encrypted_data) - 1;
+       struct sk_msg *msg_en = &rec->msg_encrypted;
 
-       return rc;
+       return sk_msg_alloc(sk, msg_en, len, 0);
 }
 
-static int move_to_plaintext_sg(struct sock *sk, int required_size)
+static int tls_clone_plaintext_msg(struct sock *sk, int required)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
-       struct scatterlist *plain_sg = &rec->sg_plaintext_data[1];
-       struct scatterlist *enc_sg = &rec->sg_encrypted_data[1];
-       int enc_sg_idx = 0;
+       struct sk_msg *msg_pl = &rec->msg_plaintext;
+       struct sk_msg *msg_en = &rec->msg_encrypted;
        int skip, len;
 
-       if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
-               return -ENOSPC;
-
-       /* We add page references worth len bytes from enc_sg at the
-        * end of plain_sg. It is guaranteed that sg_encrypted_data
+       /* We add page references worth len bytes from encrypted sg
+        * at the end of plaintext sg. It is guaranteed that msg_en
         * has enough required room (ensured by caller).
         */
-       len = required_size - rec->sg_plaintext_size;
+       len = required - msg_pl->sg.size;
 
-       /* Skip initial bytes in sg_encrypted_data to be able
-        * to use same offset of both plain and encrypted data.
+       /* Skip initial bytes in msg_en's data to be able to use
+        * same offset of both plain and encrypted data.
         */
-       skip = tls_ctx->tx.prepend_size + rec->sg_plaintext_size;
-
-       while (enc_sg_idx < rec->sg_encrypted_num_elem) {
-               if (enc_sg[enc_sg_idx].length > skip)
-                       break;
-
-               skip -= enc_sg[enc_sg_idx].length;
-               enc_sg_idx++;
-       }
+       skip = tls_ctx->tx.prepend_size + msg_pl->sg.size;
 
-       /* unmark the end of plain_sg*/
-       sg_unmark_end(plain_sg + rec->sg_plaintext_num_elem - 1);
-
-       while (len) {
-               struct page *page = sg_page(&enc_sg[enc_sg_idx]);
-               int bytes = enc_sg[enc_sg_idx].length - skip;
-               int offset = enc_sg[enc_sg_idx].offset + skip;
-
-               if (bytes > len)
-                       bytes = len;
-               else
-                       enc_sg_idx++;
-
-               /* Skipping is required only one time */
-               skip = 0;
-
-               /* Increment page reference */
-               get_page(page);
-
-               sg_set_page(&plain_sg[rec->sg_plaintext_num_elem], page,
-                           bytes, offset);
-
-               sk_mem_charge(sk, bytes);
-
-               len -= bytes;
-               rec->sg_plaintext_size += bytes;
-
-               rec->sg_plaintext_num_elem++;
-
-               if (rec->sg_plaintext_num_elem == MAX_SKB_FRAGS)
-                       return -ENOSPC;
-       }
-
-       return 0;
-}
-
-static void free_sg(struct sock *sk, struct scatterlist *sg,
-                   int *sg_num_elem, unsigned int *sg_size)
-{
-       int i, n = *sg_num_elem;
-
-       for (i = 0; i < n; ++i) {
-               sk_mem_uncharge(sk, sg[i].length);
-               put_page(sg_page(&sg[i]));
-       }
-       *sg_num_elem = 0;
-       *sg_size = 0;
+       return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
 }
 
 static void tls_free_open_rec(struct sock *sk)
@@ -372,14 +268,8 @@ static void tls_free_open_rec(struct sock *sk)
        if (!rec)
                return;
 
-       free_sg(sk, &rec->sg_encrypted_data[1],
-               &rec->sg_encrypted_num_elem,
-               &rec->sg_encrypted_size);
-
-       free_sg(sk, &rec->sg_plaintext_data[1],
-               &rec->sg_plaintext_num_elem,
-               &rec->sg_plaintext_size);
-
+       sk_msg_free(sk, &rec->msg_encrypted);
+       sk_msg_free(sk, &rec->msg_plaintext);
        kfree(rec);
 }
 
@@ -388,6 +278,7 @@ int tls_tx_records(struct sock *sk, int flags)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec, *tmp;
+       struct sk_msg *msg_en;
        int tx_flags, rc = 0;
 
        if (tls_is_partially_sent_record(tls_ctx)) {
@@ -407,9 +298,7 @@ int tls_tx_records(struct sock *sk, int flags)
                 * Remove the head of tx_list
                 */
                list_del(&rec->list);
-               free_sg(sk, &rec->sg_plaintext_data[1],
-                       &rec->sg_plaintext_num_elem, &rec->sg_plaintext_size);
-
+               sk_msg_free(sk, &rec->msg_plaintext);
                kfree(rec);
        }
 
@@ -421,17 +310,15 @@ int tls_tx_records(struct sock *sk, int flags)
                        else
                                tx_flags = flags;
 
+                       msg_en = &rec->msg_encrypted;
                        rc = tls_push_sg(sk, tls_ctx,
-                                        &rec->sg_encrypted_data[1],
+                                        &msg_en->sg.data[msg_en->sg.curr],
                                         0, tx_flags);
                        if (rc)
                                goto tx_err;
 
                        list_del(&rec->list);
-                       free_sg(sk, &rec->sg_plaintext_data[1],
-                               &rec->sg_plaintext_num_elem,
-                               &rec->sg_plaintext_size);
-
+                       sk_msg_free(sk, &rec->msg_plaintext);
                        kfree(rec);
                } else {
                        break;
@@ -451,15 +338,18 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
        struct sock *sk = req->data;
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+       struct scatterlist *sge;
+       struct sk_msg *msg_en;
        struct tls_rec *rec;
        bool ready = false;
        int pending;
 
        rec = container_of(aead_req, struct tls_rec, aead_req);
+       msg_en = &rec->msg_encrypted;
 
-       rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
-       rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
-
+       sge = sk_msg_elem(msg_en, msg_en->sg.curr);
+       sge->offset -= tls_ctx->tx.prepend_size;
+       sge->length += tls_ctx->tx.prepend_size;
 
        /* Check if error is previously set on socket */
        if (err || sk->sk_err) {
@@ -497,31 +387,29 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
 
        /* Schedule the transmission */
        if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
-               schedule_delayed_work(&ctx->tx_work.work, 2);
+               schedule_delayed_work(&ctx->tx_work.work, 1);
 }
 
 static int tls_do_encryption(struct sock *sk,
                             struct tls_context *tls_ctx,
                             struct tls_sw_context_tx *ctx,
                             struct aead_request *aead_req,
-                            size_t data_len)
+                            size_t data_len, u32 start)
 {
        struct tls_rec *rec = ctx->open_rec;
-       struct scatterlist *plain_sg = rec->sg_plaintext_data;
-       struct scatterlist *enc_sg = rec->sg_encrypted_data;
+       struct sk_msg *msg_en = &rec->msg_encrypted;
+       struct scatterlist *sge = sk_msg_elem(msg_en, start);
        int rc;
 
-       /* Skip the first index as it contains AAD data */
-       rec->sg_encrypted_data[1].offset += tls_ctx->tx.prepend_size;
-       rec->sg_encrypted_data[1].length -= tls_ctx->tx.prepend_size;
+       sge->offset += tls_ctx->tx.prepend_size;
+       sge->length -= tls_ctx->tx.prepend_size;
 
-       /* If it is inplace crypto, then pass same SG list as both src, dst */
-       if (rec->inplace_crypto)
-               plain_sg = enc_sg;
+       msg_en->sg.curr = start;
 
        aead_request_set_tfm(aead_req, ctx->aead_send);
        aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
-       aead_request_set_crypt(aead_req, plain_sg, enc_sg,
+       aead_request_set_crypt(aead_req, rec->sg_aead_in,
+                              rec->sg_aead_out,
                               data_len, tls_ctx->tx.iv);
 
        aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
@@ -534,8 +422,8 @@ static int tls_do_encryption(struct sock *sk,
        rc = crypto_aead_encrypt(aead_req);
        if (!rc || rc != -EINPROGRESS) {
                atomic_dec(&ctx->encrypt_pending);
-               rec->sg_encrypted_data[1].offset -= tls_ctx->tx.prepend_size;
-               rec->sg_encrypted_data[1].length += tls_ctx->tx.prepend_size;
+               sge->offset -= tls_ctx->tx.prepend_size;
+               sge->length += tls_ctx->tx.prepend_size;
        }
 
        if (!rc) {
@@ -557,35 +445,50 @@ static int tls_push_record(struct sock *sk, int flags,
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        struct tls_rec *rec = ctx->open_rec;
+       struct sk_msg *msg_pl, *msg_en;
        struct aead_request *req;
        int rc;
+       u32 i;
 
        if (!rec)
                return 0;
 
+       msg_pl = &rec->msg_plaintext;
+       msg_en = &rec->msg_encrypted;
+
        rec->tx_flags = flags;
        req = &rec->aead_req;
 
-       sg_mark_end(rec->sg_plaintext_data + rec->sg_plaintext_num_elem);
-       sg_mark_end(rec->sg_encrypted_data + rec->sg_encrypted_num_elem);
+       i = msg_pl->sg.end;
+       sk_msg_iter_var_prev(i);
+       sg_mark_end(sk_msg_elem(msg_pl, i));
 
-       tls_make_aad(rec->aad_space, rec->sg_plaintext_size,
+       i = msg_pl->sg.start;
+       sg_chain(rec->sg_aead_in, 2, rec->inplace_crypto ?
+                &msg_en->sg.data[i] : &msg_pl->sg.data[i]);
+
+       i = msg_en->sg.end;
+       sk_msg_iter_var_prev(i);
+       sg_mark_end(sk_msg_elem(msg_en, i));
+
+       i = msg_en->sg.start;
+       sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
+
+       tls_make_aad(rec->aad_space, msg_pl->sg.size,
                     tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size,
                     record_type);
 
        tls_fill_prepend(tls_ctx,
-                        page_address(sg_page(&rec->sg_encrypted_data[1])) +
-                        rec->sg_encrypted_data[1].offset,
-                        rec->sg_plaintext_size, record_type);
+                        page_address(sg_page(&msg_en->sg.data[i])) +
+                        msg_en->sg.data[i].offset, msg_pl->sg.size,
+                        record_type);
 
-       tls_ctx->pending_open_record_frags = 0;
-
-       rc = tls_do_encryption(sk, tls_ctx, ctx, req, rec->sg_plaintext_size);
-       if (rc == -EINPROGRESS)
-               return -EINPROGRESS;
+       tls_ctx->pending_open_record_frags = false;
 
+       rc = tls_do_encryption(sk, tls_ctx, ctx, req, msg_pl->sg.size, i);
        if (rc < 0) {
-               tls_err_abort(sk, EBADMSG);
+               if (rc != -EINPROGRESS)
+                       tls_err_abort(sk, EBADMSG);
                return rc;
        }
 
@@ -597,104 +500,11 @@ static int tls_sw_push_pending_record(struct sock *sk, int flags)
        return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
 }
 
-static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
-                             int length, int *pages_used,
-                             unsigned int *size_used,
-                             struct scatterlist *to, int to_max_pages,
-                             bool charge)
-{
-       struct page *pages[MAX_SKB_FRAGS];
-
-       size_t offset;
-       ssize_t copied, use;
-       int i = 0;
-       unsigned int size = *size_used;
-       int num_elem = *pages_used;
-       int rc = 0;
-       int maxpages;
-
-       while (length > 0) {
-               i = 0;
-               maxpages = to_max_pages - num_elem;
-               if (maxpages == 0) {
-                       rc = -EFAULT;
-                       goto out;
-               }
-               copied = iov_iter_get_pages(from, pages,
-                                           length,
-                                           maxpages, &offset);
-               if (copied <= 0) {
-                       rc = -EFAULT;
-                       goto out;
-               }
-
-               iov_iter_advance(from, copied);
-
-               length -= copied;
-               size += copied;
-               while (copied) {
-                       use = min_t(int, copied, PAGE_SIZE - offset);
-
-                       sg_set_page(&to[num_elem],
-                                   pages[i], use, offset);
-                       sg_unmark_end(&to[num_elem]);
-                       if (charge)
-                               sk_mem_charge(sk, use);
-
-                       offset = 0;
-                       copied -= use;
-
-                       ++i;
-                       ++num_elem;
-               }
-       }
-
-       /* Mark the end in the last sg entry if newly added */
-       if (num_elem > *pages_used)
-               sg_mark_end(&to[num_elem - 1]);
-out:
-       if (rc)
-               iov_iter_revert(from, size - *size_used);
-       *size_used = size;
-       *pages_used = num_elem;
-
-       return rc;
-}
-
-static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
-                            int bytes)
-{
-       struct tls_context *tls_ctx = tls_get_ctx(sk);
-       struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
-       struct tls_rec *rec = ctx->open_rec;
-       struct scatterlist *sg = &rec->sg_plaintext_data[1];
-       int copy, i, rc = 0;
-
-       for (i = tls_ctx->pending_open_record_frags;
-            i < rec->sg_plaintext_num_elem; ++i) {
-               copy = sg[i].length;
-               if (copy_from_iter(
-                               page_address(sg_page(&sg[i])) + sg[i].offset,
-                               copy, from) != copy) {
-                       rc = -EFAULT;
-                       goto out;
-               }
-               bytes -= copy;
-
-               ++tls_ctx->pending_open_record_frags;
-
-               if (!bytes)
-                       break;
-       }
-
-out:
-       return rc;
-}
-
 static struct tls_rec *get_rec(struct sock *sk)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+       struct sk_msg *msg_pl, *msg_en;
        struct tls_rec *rec;
        int mem_size;
 
@@ -708,15 +518,21 @@ static struct tls_rec *get_rec(struct sock *sk)
        if (!rec)
                return NULL;
 
-       sg_init_table(&rec->sg_plaintext_data[0],
-                     ARRAY_SIZE(rec->sg_plaintext_data));
-       sg_init_table(&rec->sg_encrypted_data[0],
-                     ARRAY_SIZE(rec->sg_encrypted_data));
+       msg_pl = &rec->msg_plaintext;
+       msg_en = &rec->msg_encrypted;
+
+       sk_msg_init(msg_pl);
+       sk_msg_init(msg_en);
 
-       sg_set_buf(&rec->sg_plaintext_data[0], rec->aad_space,
+       sg_init_table(rec->sg_aead_in, 2);
+       sg_set_buf(&rec->sg_aead_in[0], rec->aad_space,
                   sizeof(rec->aad_space));
-       sg_set_buf(&rec->sg_encrypted_data[0], rec->aad_space,
+       sg_unmark_end(&rec->sg_aead_in[1]);
+
+       sg_init_table(rec->sg_aead_out, 2);
+       sg_set_buf(&rec->sg_aead_out[0], rec->aad_space,
                   sizeof(rec->aad_space));
+       sg_unmark_end(&rec->sg_aead_out[1]);
 
        ctx->open_rec = rec;
        rec->inplace_crypto = 1;
@@ -735,6 +551,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        bool is_kvec = msg->msg_iter.type & ITER_KVEC;
        bool eor = !(msg->msg_flags & MSG_MORE);
        size_t try_to_copy, copied = 0;
+       struct sk_msg *msg_pl, *msg_en;
        struct tls_rec *rec;
        int required_size;
        int num_async = 0;
@@ -778,23 +595,26 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                        goto send_end;
                }
 
-               orig_size = rec->sg_plaintext_size;
+               msg_pl = &rec->msg_plaintext;
+               msg_en = &rec->msg_encrypted;
+
+               orig_size = msg_pl->sg.size;
                full_record = false;
                try_to_copy = msg_data_left(msg);
-               record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
+               record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
                if (try_to_copy >= record_room) {
                        try_to_copy = record_room;
                        full_record = true;
                }
 
-               required_size = rec->sg_plaintext_size + try_to_copy +
+               required_size = msg_pl->sg.size + try_to_copy +
                                tls_ctx->tx.overhead_size;
 
                if (!sk_stream_memory_free(sk))
                        goto wait_for_sndbuf;
 
 alloc_encrypted:
-               ret = alloc_encrypted_sg(sk, required_size);
+               ret = tls_alloc_encrypted_msg(sk, required_size);
                if (ret) {
                        if (ret != -ENOSPC)
                                goto wait_for_memory;
@@ -803,17 +623,13 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                         * actually allocated. The difference is due
                         * to max sg elements limit
                         */
-                       try_to_copy -= required_size - rec->sg_encrypted_size;
+                       try_to_copy -= required_size - msg_en->sg.size;
                        full_record = true;
                }
 
                if (!is_kvec && (full_record || eor) && !async_capable) {
-                       ret = zerocopy_from_iter(sk, &msg->msg_iter,
-                               try_to_copy, &rec->sg_plaintext_num_elem,
-                               &rec->sg_plaintext_size,
-                               &rec->sg_plaintext_data[1],
-                               ARRAY_SIZE(rec->sg_plaintext_data) - 1,
-                               true);
+                       ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
+                                                       msg_pl, try_to_copy);
                        if (ret)
                                goto fallback_to_reg_send;
 
@@ -831,15 +647,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                        continue;
 
 fallback_to_reg_send:
-                       trim_sg(sk, &rec->sg_plaintext_data[1],
-                               &rec->sg_plaintext_num_elem,
-                               &rec->sg_plaintext_size,
-                               orig_size);
+                       sk_msg_trim(sk, msg_pl, orig_size);
                }
 
-               required_size = rec->sg_plaintext_size + try_to_copy;
+               required_size = msg_pl->sg.size + try_to_copy;
 
-               ret = move_to_plaintext_sg(sk, required_size);
+               ret = tls_clone_plaintext_msg(sk, required_size);
                if (ret) {
                        if (ret != -ENOSPC)
                                goto send_end;
@@ -848,20 +661,21 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                         * actually allocated. The difference is due
                         * to max sg elements limit
                         */
-                       try_to_copy -= required_size - rec->sg_plaintext_size;
+                       try_to_copy -= required_size - msg_pl->sg.size;
                        full_record = true;
-
-                       trim_sg(sk, &rec->sg_encrypted_data[1],
-                               &rec->sg_encrypted_num_elem,
-                               &rec->sg_encrypted_size,
-                               rec->sg_plaintext_size +
-                               tls_ctx->tx.overhead_size);
+                       sk_msg_trim(sk, msg_en, msg_pl->sg.size +
+                                   tls_ctx->tx.overhead_size);
                }
 
-               ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
-               if (ret)
+               ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl,
+                                              try_to_copy);
+               if (ret < 0)
                        goto trim_sgl;
 
+               /* Open records defined only if successfully copied, otherwise
+                * we would trim the sg but not reset the open record frags.
+                */
+               tls_ctx->pending_open_record_frags = true;
                copied += try_to_copy;
                if (full_record || eor) {
                        ret = tls_push_record(sk, msg->msg_flags, record_type);
@@ -881,11 +695,11 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                ret = sk_stream_wait_memory(sk, &timeo);
                if (ret) {
 trim_sgl:
-                       trim_both_sgl(sk, orig_size);
+                       tls_trim_both_msgs(sk, orig_size);
                        goto send_end;
                }
 
-               if (rec->sg_encrypted_size < required_size)
+               if (msg_en->sg.size < required_size)
                        goto alloc_encrypted;
        }
 
@@ -929,7 +743,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
        struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
        unsigned char record_type = TLS_RECORD_TYPE_DATA;
        size_t orig_size = size;
-       struct scatterlist *sg;
+       struct sk_msg *msg_pl;
        struct tls_rec *rec;
        int num_async = 0;
        bool full_record;
@@ -970,20 +784,23 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
                        goto sendpage_end;
                }
 
+               msg_pl = &rec->msg_plaintext;
+
                full_record = false;
-               record_room = TLS_MAX_PAYLOAD_SIZE - rec->sg_plaintext_size;
+               record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
                copy = size;
                if (copy >= record_room) {
                        copy = record_room;
                        full_record = true;
                }
-               required_size = rec->sg_plaintext_size + copy +
-                             tls_ctx->tx.overhead_size;
+
+               required_size = msg_pl->sg.size + copy +
+                               tls_ctx->tx.overhead_size;
 
                if (!sk_stream_memory_free(sk))
                        goto wait_for_sndbuf;
 alloc_payload:
-               ret = alloc_encrypted_sg(sk, required_size);
+               ret = tls_alloc_encrypted_msg(sk, required_size);
                if (ret) {
                        if (ret != -ENOSPC)
                                goto wait_for_memory;
@@ -992,26 +809,18 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
                         * actually allocated. The difference is due
                         * to max sg elements limit
                         */
-                       copy -= required_size - rec->sg_plaintext_size;
+                       copy -= required_size - msg_pl->sg.size;
                        full_record = true;
                }
 
-               get_page(page);
-               sg = &rec->sg_plaintext_data[1] + rec->sg_plaintext_num_elem;
-               sg_set_page(sg, page, copy, offset);
-               sg_unmark_end(sg);
-
-               rec->sg_plaintext_num_elem++;
-
+               sk_msg_page_add(msg_pl, page, copy, offset);
                sk_mem_charge(sk, copy);
+
                offset += copy;
                size -= copy;
-               rec->sg_plaintext_size += copy;
-               tls_ctx->pending_open_record_frags = rec->sg_plaintext_num_elem;
 
-               if (full_record || eor ||
-                   rec->sg_plaintext_num_elem ==
-                   ARRAY_SIZE(rec->sg_plaintext_data) - 1) {
+               tls_ctx->pending_open_record_frags = true;
+               if (full_record || eor || sk_msg_full(msg_pl)) {
                        rec->inplace_crypto = 0;
                        ret = tls_push_record(sk, flags, record_type);
                        if (ret) {
@@ -1027,7 +836,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
 wait_for_memory:
                ret = sk_stream_wait_memory(sk, &timeo);
                if (ret) {
-                       trim_both_sgl(sk, rec->sg_plaintext_size);
+                       tls_trim_both_msgs(sk, msg_pl->sg.size);
                        goto sendpage_end;
                }
 
@@ -1092,6 +901,64 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
        return skb;
 }
 
+static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
+                              int length, int *pages_used,
+                              unsigned int *size_used,
+                              struct scatterlist *to,
+                              int to_max_pages)
+{
+       int rc = 0, i = 0, num_elem = *pages_used, maxpages;
+       struct page *pages[MAX_SKB_FRAGS];
+       unsigned int size = *size_used;
+       ssize_t copied, use;
+       size_t offset;
+
+       while (length > 0) {
+               i = 0;
+               maxpages = to_max_pages - num_elem;
+               if (maxpages == 0) {
+                       rc = -EFAULT;
+                       goto out;
+               }
+               copied = iov_iter_get_pages(from, pages,
+                                           length,
+                                           maxpages, &offset);
+               if (copied <= 0) {
+                       rc = -EFAULT;
+                       goto out;
+               }
+
+               iov_iter_advance(from, copied);
+
+               length -= copied;
+               size += copied;
+               while (copied) {
+                       use = min_t(int, copied, PAGE_SIZE - offset);
+
+                       sg_set_page(&to[num_elem],
+                                   pages[i], use, offset);
+                       sg_unmark_end(&to[num_elem]);
+                       /* We do not uncharge memory from this API */
+
+                       offset = 0;
+                       copied -= use;
+
+                       i++;
+                       num_elem++;
+               }
+       }
+       /* Mark the end in the last sg entry if newly added */
+       if (num_elem > *pages_used)
+               sg_mark_end(&to[num_elem - 1]);
+out:
+       if (rc)
+               iov_iter_revert(from, size - *size_used);
+       *size_used = size;
+       *pages_used = num_elem;
+
+       return rc;
+}
+
 /* This function decrypts the input skb into either out_iov or in out_sg
  * or in skb buffers itself. The input parameter 'zc' indicates if
  * zero-copy mode needs to be tried or not. With zero-copy mode, either
@@ -1189,9 +1056,9 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
                        sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE);
 
                        *chunk = 0;
-                       err = zerocopy_from_iter(sk, out_iov, data_len, &pages,
-                                                chunk, &sgout[1],
-                                                (n_sgout - 1), false);
+                       err = tls_setup_from_iter(sk, out_iov, data_len,
+                                                 &pages, chunk, &sgout[1],
+                                                 (n_sgout - 1));
                        if (err < 0)
                                goto fallback_to_reg_recv;
                } else if (out_sg) {
@@ -1619,25 +1486,15 @@ void tls_sw_free_resources_tx(struct sock *sk)
 
                rec = list_first_entry(&ctx->tx_list,
                                       struct tls_rec, list);
-
-               free_sg(sk, &rec->sg_plaintext_data[1],
-                       &rec->sg_plaintext_num_elem,
-                       &rec->sg_plaintext_size);
-
                list_del(&rec->list);
+               sk_msg_free(sk, &rec->msg_plaintext);
                kfree(rec);
        }
 
        list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
-               free_sg(sk, &rec->sg_encrypted_data[1],
-                       &rec->sg_encrypted_num_elem,
-                       &rec->sg_encrypted_size);
-
-               free_sg(sk, &rec->sg_plaintext_data[1],
-                       &rec->sg_plaintext_num_elem,
-                       &rec->sg_plaintext_size);
-
                list_del(&rec->list);
+               sk_msg_free(sk, &rec->msg_encrypted);
+               sk_msg_free(sk, &rec->msg_plaintext);
                kfree(rec);
        }