1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <crypto/aead.h>
34 #include <crypto/scatterwalk.h>
35 #include <net/ip6_checksum.h>
37 static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
39 struct scatterlist *src = walk->sg;
40 int diff = walk->offset - src->offset;
42 sg_set_page(sg, sg_page(src),
43 src->length - diff, walk->offset);
45 scatterwalk_crypto_chain(sg, sg_next(src), 2);
48 static int tls_enc_record(struct aead_request *aead_req,
49 struct crypto_aead *aead, char *aad,
50 char *iv, __be64 rcd_sn,
51 struct scatter_walk *in,
52 struct scatter_walk *out, int *in_len)
54 unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE];
55 struct scatterlist sg_in[3];
56 struct scatterlist sg_out[3];
60 len = min_t(int, *in_len, ARRAY_SIZE(buf));
62 scatterwalk_copychunks(buf, in, len, 0);
63 scatterwalk_copychunks(buf, out, len, 1);
69 scatterwalk_pagedone(in, 0, 1);
70 scatterwalk_pagedone(out, 1, 1);
72 len = buf[4] | (buf[3] << 8);
73 len -= TLS_CIPHER_AES_GCM_128_IV_SIZE;
75 tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE,
76 (char *)&rcd_sn, sizeof(rcd_sn), buf[0],
79 memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE,
80 TLS_CIPHER_AES_GCM_128_IV_SIZE);
82 sg_init_table(sg_in, ARRAY_SIZE(sg_in));
83 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
84 sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE);
85 sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE);
86 chain_to_walk(sg_in + 1, in);
87 chain_to_walk(sg_out + 1, out);
91 *in_len += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
92 /* the input buffer doesn't contain the entire record.
93 * trim len accordingly. The resulting authentication tag
94 * will contain garbage, but we don't care, so we won't
95 * include any of it in the output skb
96 * Note that we assume the output buffer length
97 * is larger then input buffer length + tag size
106 scatterwalk_copychunks(NULL, in, len, 2);
107 scatterwalk_pagedone(in, 0, 1);
108 scatterwalk_copychunks(NULL, out, len, 2);
109 scatterwalk_pagedone(out, 1, 1);
112 len -= TLS_CIPHER_AES_GCM_128_TAG_SIZE;
113 aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
115 rc = crypto_aead_encrypt(aead_req);
120 static void tls_init_aead_request(struct aead_request *aead_req,
121 struct crypto_aead *aead)
123 aead_request_set_tfm(aead_req, aead);
124 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
127 static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead,
130 unsigned int req_size = sizeof(struct aead_request) +
131 crypto_aead_reqsize(aead);
132 struct aead_request *aead_req;
134 aead_req = kzalloc(req_size, flags);
136 tls_init_aead_request(aead_req, aead);
140 static int tls_enc_records(struct aead_request *aead_req,
141 struct crypto_aead *aead, struct scatterlist *sg_in,
142 struct scatterlist *sg_out, char *aad, char *iv,
145 struct scatter_walk out, in;
148 scatterwalk_start(&in, sg_in);
149 scatterwalk_start(&out, sg_out);
152 rc = tls_enc_record(aead_req, aead, aad, iv,
153 cpu_to_be64(rcd_sn), &in, &out, &len);
156 } while (rc == 0 && len);
158 scatterwalk_done(&in, 0, 0);
159 scatterwalk_done(&out, 1, 0);
164 /* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
165 * might have been changed by NAT.
167 static void update_chksum(struct sk_buff *skb, int headln)
169 struct tcphdr *th = tcp_hdr(skb);
170 int datalen = skb->len - headln;
171 const struct ipv6hdr *ipv6h;
172 const struct iphdr *iph;
174 /* We only changed the payload so if we are using partial we don't
175 * need to update anything.
177 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
180 skb->ip_summed = CHECKSUM_PARTIAL;
181 skb->csum_start = skb_transport_header(skb) - skb->head;
182 skb->csum_offset = offsetof(struct tcphdr, check);
184 if (skb->sk->sk_family == AF_INET6) {
185 ipv6h = ipv6_hdr(skb);
186 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
187 datalen, IPPROTO_TCP, 0);
190 th->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
195 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
197 struct sock *sk = skb->sk;
200 skb_copy_header(nskb, skb);
202 skb_put(nskb, skb->len);
203 memcpy(nskb->data, skb->data, headln);
204 update_chksum(nskb, headln);
206 nskb->destructor = skb->destructor;
208 skb->destructor = NULL;
211 delta = nskb->truesize - skb->truesize;
212 if (likely(delta < 0))
213 WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
215 refcount_add(delta, &sk->sk_wmem_alloc);
218 /* This function may be called after the user socket is already
219 * closed so make sure we don't use anything freed during
220 * tls_sk_proto_close here
223 static int fill_sg_in(struct scatterlist *sg_in,
225 struct tls_offload_context_tx *ctx,
230 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
231 int payload_len = skb->len - tcp_payload_offset;
232 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
233 struct tls_record_info *record;
238 spin_lock_irqsave(&ctx->lock, flags);
239 record = tls_get_record(ctx, tcp_seq, rcd_sn);
241 spin_unlock_irqrestore(&ctx->lock, flags);
242 WARN(1, "Record not found for seq %u\n", tcp_seq);
246 *sync_size = tcp_seq - tls_record_start_seq(record);
247 if (*sync_size < 0) {
248 int is_start_marker = tls_record_is_start_marker(record);
250 spin_unlock_irqrestore(&ctx->lock, flags);
251 /* This should only occur if the relevant record was
252 * already acked. In that case it should be ok
253 * to drop the packet and avoid retransmission.
255 * There is a corner case where the packet contains
256 * both an acked and a non-acked record.
257 * We currently don't handle that case and rely
258 * on TCP to retranmit a packet that doesn't contain
259 * already acked payload.
261 if (!is_start_marker)
266 remaining = *sync_size;
267 for (i = 0; remaining > 0; i++) {
268 skb_frag_t *frag = &record->frags[i];
270 __skb_frag_ref(frag);
271 sg_set_page(sg_in + i, skb_frag_page(frag),
272 skb_frag_size(frag), frag->page_offset);
274 remaining -= skb_frag_size(frag);
277 sg_in[i].length += remaining;
281 spin_unlock_irqrestore(&ctx->lock, flags);
282 if (skb_to_sgvec(skb, &sg_in[i], tcp_payload_offset, payload_len) < 0)
288 static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
289 struct tls_context *tls_ctx,
290 struct sk_buff *nskb,
291 int tcp_payload_offset,
296 sg_set_buf(&sg_out[0], dummy_buf, sync_size);
297 sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
298 /* Add room for authentication tag produced by crypto */
299 dummy_buf += sync_size;
300 sg_set_buf(&sg_out[2], dummy_buf, TLS_CIPHER_AES_GCM_128_TAG_SIZE);
303 static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
304 struct scatterlist sg_out[3],
305 struct scatterlist *sg_in,
307 s32 sync_size, u64 rcd_sn)
309 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
310 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
311 int payload_len = skb->len - tcp_payload_offset;
312 void *buf, *iv, *aad, *dummy_buf;
313 struct aead_request *aead_req;
314 struct sk_buff *nskb = NULL;
317 aead_req = tls_alloc_aead_request(ctx->aead_send, GFP_ATOMIC);
321 buf_len = TLS_CIPHER_AES_GCM_128_SALT_SIZE +
322 TLS_CIPHER_AES_GCM_128_IV_SIZE +
325 TLS_CIPHER_AES_GCM_128_TAG_SIZE;
326 buf = kmalloc(buf_len, GFP_ATOMIC);
331 memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
332 TLS_CIPHER_AES_GCM_128_SALT_SIZE);
333 aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
334 TLS_CIPHER_AES_GCM_128_IV_SIZE;
335 dummy_buf = aad + TLS_AAD_SPACE_SIZE;
337 nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
341 skb_reserve(nskb, skb_headroom(skb));
343 fill_sg_out(sg_out, buf, tls_ctx, nskb, tcp_payload_offset,
344 payload_len, sync_size, dummy_buf);
346 if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv,
347 rcd_sn, sync_size + payload_len) < 0)
350 complete_skb(nskb, skb, tcp_payload_offset);
352 /* validate_xmit_skb_list assumes that if the skb wasn't segmented
353 * nskb->prev will point to the skb itself
368 static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
370 int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
371 struct tls_context *tls_ctx = tls_get_ctx(sk);
372 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
373 int payload_len = skb->len - tcp_payload_offset;
374 struct scatterlist *sg_in, sg_out[3];
375 struct sk_buff *nskb = NULL;
376 int sg_in_max_elements;
382 * MAX_SKB_FRAGS in tls_record_info
383 * MAX_SKB_FRAGS + 1 in SKB head and frags.
385 sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;
390 sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
394 sg_init_table(sg_in, sg_in_max_elements);
395 sg_init_table(sg_out, ARRAY_SIZE(sg_out));
397 if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
398 /* bypass packets before kernel TLS socket option was set */
399 if (sync_size < 0 && payload_len <= -sync_size)
404 nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);
408 put_page(sg_page(&sg_in[--resync_sgs]));
415 struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
416 struct net_device *dev,
419 if (dev == tls_get_ctx(sk)->netdev)
422 return tls_sw_fallback(sk, skb);
424 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
426 int tls_sw_fallback_init(struct sock *sk,
427 struct tls_offload_context_tx *offload_ctx,
428 struct tls_crypto_info *crypto_info)
433 offload_ctx->aead_send =
434 crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
435 if (IS_ERR(offload_ctx->aead_send)) {
436 rc = PTR_ERR(offload_ctx->aead_send);
437 pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
438 offload_ctx->aead_send = NULL;
442 key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
444 rc = crypto_aead_setkey(offload_ctx->aead_send, key,
445 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
449 rc = crypto_aead_setauthsize(offload_ctx->aead_send,
450 TLS_CIPHER_AES_GCM_128_TAG_SIZE);
456 crypto_free_aead(offload_ctx->aead_send);