2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
45 MODULE_AUTHOR("Mellanox Technologies");
46 MODULE_DESCRIPTION("Transport Layer Security Support");
47 MODULE_LICENSE("Dual BSD/GPL");
48 MODULE_ALIAS_TCP_ULP("tls");
56 static struct proto *saved_tcpv6_prot;
57 static DEFINE_MUTEX(tcpv6_prot_mutex);
58 static struct proto *saved_tcpv4_prot;
59 static DEFINE_MUTEX(tcpv4_prot_mutex);
60 static LIST_HEAD(device_list);
61 static DEFINE_SPINLOCK(device_spinlock);
62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
63 static struct proto_ops tls_sw_proto_ops;
65 static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
67 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
69 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
72 int wait_on_pending_writer(struct sock *sk, long *timeo)
75 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 add_wait_queue(sk_sleep(sk), &wait);
84 if (signal_pending(current)) {
85 rc = sock_intr_errno(*timeo);
89 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
92 remove_wait_queue(sk_sleep(sk), &wait);
96 int tls_push_sg(struct sock *sk,
97 struct tls_context *ctx,
98 struct scatterlist *sg,
102 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
106 int offset = first_offset;
108 size = sg->length - offset;
109 offset += sg->offset;
111 ctx->in_tcp_sendpages = true;
114 sendpage_flags = flags;
116 /* is sending application-limited? */
117 tcp_rate_check_app_limited(sk);
120 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
129 offset -= sg->offset;
130 ctx->partially_sent_offset = offset;
131 ctx->partially_sent_record = (void *)sg;
132 ctx->in_tcp_sendpages = false;
137 sk_mem_uncharge(sk, sg->length);
146 ctx->in_tcp_sendpages = false;
147 ctx->sk_write_space(sk);
152 static int tls_handle_open_record(struct sock *sk, int flags)
154 struct tls_context *ctx = tls_get_ctx(sk);
156 if (tls_is_pending_open_record(ctx))
157 return ctx->push_pending_record(sk, flags);
162 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
163 unsigned char *record_type)
165 struct cmsghdr *cmsg;
168 for_each_cmsghdr(cmsg, msg) {
169 if (!CMSG_OK(msg, cmsg))
171 if (cmsg->cmsg_level != SOL_TLS)
174 switch (cmsg->cmsg_type) {
175 case TLS_SET_RECORD_TYPE:
176 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
179 if (msg->msg_flags & MSG_MORE)
182 rc = tls_handle_open_record(sk, msg->msg_flags);
186 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
197 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
200 struct scatterlist *sg;
203 sg = ctx->partially_sent_record;
204 offset = ctx->partially_sent_offset;
206 ctx->partially_sent_record = NULL;
207 return tls_push_sg(sk, ctx, sg, offset, flags);
210 int tls_push_pending_closed_record(struct sock *sk,
211 struct tls_context *tls_ctx,
212 int flags, long *timeo)
214 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
216 if (tls_is_partially_sent_record(tls_ctx) ||
217 !list_empty(&ctx->tx_list))
218 return tls_tx_records(sk, flags);
220 return tls_ctx->push_pending_record(sk, flags);
223 static void tls_write_space(struct sock *sk)
225 struct tls_context *ctx = tls_get_ctx(sk);
226 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
228 /* If in_tcp_sendpages call lower protocol write space handler
229 * to ensure we wake up any waiting operations there. For example
230 * if do_tcp_sendpages where to call sk_wait_event.
232 if (ctx->in_tcp_sendpages) {
233 ctx->sk_write_space(sk);
237 /* Schedule the transmission if tx list is ready */
238 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
239 /* Schedule the transmission */
240 if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
241 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
244 ctx->sk_write_space(sk);
247 static void tls_ctx_free(struct tls_context *ctx)
252 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
253 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
257 static void tls_sk_proto_close(struct sock *sk, long timeout)
259 struct tls_context *ctx = tls_get_ctx(sk);
260 long timeo = sock_sndtimeo(sk, 0);
261 void (*sk_proto_close)(struct sock *sk, long timeout);
262 bool free_ctx = false;
265 sk_proto_close = ctx->sk_proto_close;
267 if ((ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD) ||
268 (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE)) {
270 goto skip_tx_cleanup;
273 if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
274 tls_handle_open_record(sk, 0);
276 /* We need these for tls_sw_fallback handling of other packets */
277 if (ctx->tx_conf == TLS_SW) {
278 kfree(ctx->tx.rec_seq);
280 tls_sw_free_resources_tx(sk);
283 if (ctx->rx_conf == TLS_SW) {
284 kfree(ctx->rx.rec_seq);
286 tls_sw_free_resources_rx(sk);
289 #ifdef CONFIG_TLS_DEVICE
290 if (ctx->rx_conf == TLS_HW)
291 tls_device_offload_cleanup_rx(sk);
293 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
303 sk_proto_close(sk, timeout);
304 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
305 * for sk->sk_prot->unhash [tls_hw_unhash]
311 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
315 struct tls_context *ctx = tls_get_ctx(sk);
316 struct tls_crypto_info *crypto_info;
319 if (get_user(len, optlen))
322 if (!optval || (len < sizeof(*crypto_info))) {
332 /* get user crypto info */
333 crypto_info = &ctx->crypto_send.info;
335 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
340 if (len == sizeof(*crypto_info)) {
341 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
346 switch (crypto_info->cipher_type) {
347 case TLS_CIPHER_AES_GCM_128: {
348 struct tls12_crypto_info_aes_gcm_128 *
349 crypto_info_aes_gcm_128 =
350 container_of(crypto_info,
351 struct tls12_crypto_info_aes_gcm_128,
354 if (len != sizeof(*crypto_info_aes_gcm_128)) {
359 memcpy(crypto_info_aes_gcm_128->iv,
360 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
361 TLS_CIPHER_AES_GCM_128_IV_SIZE);
362 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
363 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
365 if (copy_to_user(optval,
366 crypto_info_aes_gcm_128,
367 sizeof(*crypto_info_aes_gcm_128)))
379 static int do_tls_getsockopt(struct sock *sk, int optname,
380 char __user *optval, int __user *optlen)
386 rc = do_tls_getsockopt_tx(sk, optval, optlen);
395 static int tls_getsockopt(struct sock *sk, int level, int optname,
396 char __user *optval, int __user *optlen)
398 struct tls_context *ctx = tls_get_ctx(sk);
400 if (level != SOL_TLS)
401 return ctx->getsockopt(sk, level, optname, optval, optlen);
403 return do_tls_getsockopt(sk, optname, optval, optlen);
406 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
407 unsigned int optlen, int tx)
409 struct tls_crypto_info *crypto_info;
410 struct tls_context *ctx = tls_get_ctx(sk);
414 if (!optval || (optlen < sizeof(*crypto_info))) {
420 crypto_info = &ctx->crypto_send.info;
422 crypto_info = &ctx->crypto_recv.info;
424 /* Currently we don't support set crypto info more than one time */
425 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
430 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
433 goto err_crypto_info;
437 if (crypto_info->version != TLS_1_2_VERSION) {
439 goto err_crypto_info;
442 switch (crypto_info->cipher_type) {
443 case TLS_CIPHER_AES_GCM_128: {
444 if (optlen != sizeof(struct tls12_crypto_info_aes_gcm_128)) {
446 goto err_crypto_info;
448 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
449 optlen - sizeof(*crypto_info));
452 goto err_crypto_info;
458 goto err_crypto_info;
462 #ifdef CONFIG_TLS_DEVICE
463 rc = tls_set_device_offload(sk, ctx);
469 rc = tls_set_sw_offload(sk, ctx, 1);
473 #ifdef CONFIG_TLS_DEVICE
474 rc = tls_set_device_offload_rx(sk, ctx);
480 rc = tls_set_sw_offload(sk, ctx, 0);
486 goto err_crypto_info;
492 update_sk_prot(sk, ctx);
494 ctx->sk_write_space = sk->sk_write_space;
495 sk->sk_write_space = tls_write_space;
497 sk->sk_socket->ops = &tls_sw_proto_ops;
502 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
507 static int do_tls_setsockopt(struct sock *sk, int optname,
508 char __user *optval, unsigned int optlen)
516 rc = do_tls_setsockopt_conf(sk, optval, optlen,
527 static int tls_setsockopt(struct sock *sk, int level, int optname,
528 char __user *optval, unsigned int optlen)
530 struct tls_context *ctx = tls_get_ctx(sk);
532 if (level != SOL_TLS)
533 return ctx->setsockopt(sk, level, optname, optval, optlen);
535 return do_tls_setsockopt(sk, optname, optval, optlen);
538 static struct tls_context *create_ctx(struct sock *sk)
540 struct inet_connection_sock *icsk = inet_csk(sk);
541 struct tls_context *ctx;
543 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
547 icsk->icsk_ulp_data = ctx;
548 ctx->setsockopt = sk->sk_prot->setsockopt;
549 ctx->getsockopt = sk->sk_prot->getsockopt;
550 ctx->sk_proto_close = sk->sk_prot->close;
554 static int tls_hw_prot(struct sock *sk)
556 struct tls_context *ctx;
557 struct tls_device *dev;
560 spin_lock_bh(&device_spinlock);
561 list_for_each_entry(dev, &device_list, dev_list) {
562 if (dev->feature && dev->feature(dev)) {
563 ctx = create_ctx(sk);
567 ctx->hash = sk->sk_prot->hash;
568 ctx->unhash = sk->sk_prot->unhash;
569 ctx->sk_proto_close = sk->sk_prot->close;
570 ctx->rx_conf = TLS_HW_RECORD;
571 ctx->tx_conf = TLS_HW_RECORD;
572 update_sk_prot(sk, ctx);
578 spin_unlock_bh(&device_spinlock);
582 static void tls_hw_unhash(struct sock *sk)
584 struct tls_context *ctx = tls_get_ctx(sk);
585 struct tls_device *dev;
587 spin_lock_bh(&device_spinlock);
588 list_for_each_entry(dev, &device_list, dev_list) {
590 kref_get(&dev->kref);
591 spin_unlock_bh(&device_spinlock);
592 dev->unhash(dev, sk);
593 kref_put(&dev->kref, dev->release);
594 spin_lock_bh(&device_spinlock);
597 spin_unlock_bh(&device_spinlock);
601 static int tls_hw_hash(struct sock *sk)
603 struct tls_context *ctx = tls_get_ctx(sk);
604 struct tls_device *dev;
608 spin_lock_bh(&device_spinlock);
609 list_for_each_entry(dev, &device_list, dev_list) {
611 kref_get(&dev->kref);
612 spin_unlock_bh(&device_spinlock);
613 err |= dev->hash(dev, sk);
614 kref_put(&dev->kref, dev->release);
615 spin_lock_bh(&device_spinlock);
618 spin_unlock_bh(&device_spinlock);
625 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
628 prot[TLS_BASE][TLS_BASE] = *base;
629 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
630 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
631 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
633 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
634 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
635 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
637 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
638 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
639 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
640 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
642 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
643 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
644 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
645 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
647 #ifdef CONFIG_TLS_DEVICE
648 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
649 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
650 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
652 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
653 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
654 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
656 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
658 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
660 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
663 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
664 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
665 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
666 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
669 static int tls_init(struct sock *sk)
671 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
672 struct tls_context *ctx;
678 /* The TLS ulp is currently supported only for TCP sockets
679 * in ESTABLISHED state.
680 * Supporting sockets in LISTEN state will require us
681 * to modify the accept implementation to clone rather then
682 * share the ulp context.
684 if (sk->sk_state != TCP_ESTABLISHED)
687 /* allocate tls context */
688 ctx = create_ctx(sk);
694 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
695 if (ip_ver == TLSV6 &&
696 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
697 mutex_lock(&tcpv6_prot_mutex);
698 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
699 build_protos(tls_prots[TLSV6], sk->sk_prot);
700 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
702 mutex_unlock(&tcpv6_prot_mutex);
705 if (ip_ver == TLSV4 &&
706 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
707 mutex_lock(&tcpv4_prot_mutex);
708 if (likely(sk->sk_prot != saved_tcpv4_prot)) {
709 build_protos(tls_prots[TLSV4], sk->sk_prot);
710 smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
712 mutex_unlock(&tcpv4_prot_mutex);
715 ctx->tx_conf = TLS_BASE;
716 ctx->rx_conf = TLS_BASE;
717 update_sk_prot(sk, ctx);
722 void tls_register_device(struct tls_device *device)
724 spin_lock_bh(&device_spinlock);
725 list_add_tail(&device->dev_list, &device_list);
726 spin_unlock_bh(&device_spinlock);
728 EXPORT_SYMBOL(tls_register_device);
730 void tls_unregister_device(struct tls_device *device)
732 spin_lock_bh(&device_spinlock);
733 list_del(&device->dev_list);
734 spin_unlock_bh(&device_spinlock);
736 EXPORT_SYMBOL(tls_unregister_device);
738 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
740 .owner = THIS_MODULE,
744 static int __init tls_register(void)
746 tls_sw_proto_ops = inet_stream_ops;
747 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
749 #ifdef CONFIG_TLS_DEVICE
752 tcp_register_ulp(&tcp_tls_ulp_ops);
757 static void __exit tls_unregister(void)
759 tcp_unregister_ulp(&tcp_tls_ulp_ops);
760 #ifdef CONFIG_TLS_DEVICE
761 tls_device_cleanup();
765 module_init(tls_register);
766 module_exit(tls_unregister);