2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
45 MODULE_AUTHOR("Mellanox Technologies");
46 MODULE_DESCRIPTION("Transport Layer Security Support");
47 MODULE_LICENSE("Dual BSD/GPL");
48 MODULE_ALIAS_TCP_ULP("tls");
56 static struct proto *saved_tcpv6_prot;
57 static DEFINE_MUTEX(tcpv6_prot_mutex);
58 static struct proto *saved_tcpv4_prot;
59 static DEFINE_MUTEX(tcpv4_prot_mutex);
60 static LIST_HEAD(device_list);
61 static DEFINE_SPINLOCK(device_spinlock);
62 static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
63 static struct proto_ops tls_sw_proto_ops;
64 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
67 static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
69 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
71 sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
74 int wait_on_pending_writer(struct sock *sk, long *timeo)
77 DEFINE_WAIT_FUNC(wait, woken_wake_function);
79 add_wait_queue(sk_sleep(sk), &wait);
86 if (signal_pending(current)) {
87 rc = sock_intr_errno(*timeo);
91 if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
94 remove_wait_queue(sk_sleep(sk), &wait);
98 int tls_push_sg(struct sock *sk,
99 struct tls_context *ctx,
100 struct scatterlist *sg,
104 int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
108 int offset = first_offset;
110 size = sg->length - offset;
111 offset += sg->offset;
113 ctx->in_tcp_sendpages = true;
116 sendpage_flags = flags;
118 /* is sending application-limited? */
119 tcp_rate_check_app_limited(sk);
122 ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
131 offset -= sg->offset;
132 ctx->partially_sent_offset = offset;
133 ctx->partially_sent_record = (void *)sg;
134 ctx->in_tcp_sendpages = false;
139 sk_mem_uncharge(sk, sg->length);
148 ctx->in_tcp_sendpages = false;
149 ctx->sk_write_space(sk);
154 static int tls_handle_open_record(struct sock *sk, int flags)
156 struct tls_context *ctx = tls_get_ctx(sk);
158 if (tls_is_pending_open_record(ctx))
159 return ctx->push_pending_record(sk, flags);
164 int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
165 unsigned char *record_type)
167 struct cmsghdr *cmsg;
170 for_each_cmsghdr(cmsg, msg) {
171 if (!CMSG_OK(msg, cmsg))
173 if (cmsg->cmsg_level != SOL_TLS)
176 switch (cmsg->cmsg_type) {
177 case TLS_SET_RECORD_TYPE:
178 if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
181 if (msg->msg_flags & MSG_MORE)
184 rc = tls_handle_open_record(sk, msg->msg_flags);
188 *record_type = *(unsigned char *)CMSG_DATA(cmsg);
199 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
202 struct scatterlist *sg;
205 sg = ctx->partially_sent_record;
206 offset = ctx->partially_sent_offset;
208 ctx->partially_sent_record = NULL;
209 return tls_push_sg(sk, ctx, sg, offset, flags);
212 static void tls_write_space(struct sock *sk)
214 struct tls_context *ctx = tls_get_ctx(sk);
216 /* If in_tcp_sendpages call lower protocol write space handler
217 * to ensure we wake up any waiting operations there. For example
218 * if do_tcp_sendpages where to call sk_wait_event.
220 if (ctx->in_tcp_sendpages) {
221 ctx->sk_write_space(sk);
225 #ifdef CONFIG_TLS_DEVICE
226 if (ctx->tx_conf == TLS_HW)
227 tls_device_write_space(sk, ctx);
230 tls_sw_write_space(sk, ctx);
233 static void tls_ctx_free(struct tls_context *ctx)
238 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
239 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
243 static void tls_sk_proto_close(struct sock *sk, long timeout)
245 struct tls_context *ctx = tls_get_ctx(sk);
246 long timeo = sock_sndtimeo(sk, 0);
247 void (*sk_proto_close)(struct sock *sk, long timeout);
248 bool free_ctx = false;
251 sk_proto_close = ctx->sk_proto_close;
253 if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
254 goto skip_tx_cleanup;
256 if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
258 goto skip_tx_cleanup;
261 if (!tls_complete_pending_work(sk, ctx, 0, &timeo))
262 tls_handle_open_record(sk, 0);
264 /* We need these for tls_sw_fallback handling of other packets */
265 if (ctx->tx_conf == TLS_SW) {
266 kfree(ctx->tx.rec_seq);
268 tls_sw_free_resources_tx(sk);
271 if (ctx->rx_conf == TLS_SW) {
272 kfree(ctx->rx.rec_seq);
274 tls_sw_free_resources_rx(sk);
277 #ifdef CONFIG_TLS_DEVICE
278 if (ctx->rx_conf == TLS_HW)
279 tls_device_offload_cleanup_rx(sk);
281 if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
291 sk_proto_close(sk, timeout);
292 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
293 * for sk->sk_prot->unhash [tls_hw_unhash]
299 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
303 struct tls_context *ctx = tls_get_ctx(sk);
304 struct tls_crypto_info *crypto_info;
307 if (get_user(len, optlen))
310 if (!optval || (len < sizeof(*crypto_info))) {
320 /* get user crypto info */
321 crypto_info = &ctx->crypto_send.info;
323 if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
328 if (len == sizeof(*crypto_info)) {
329 if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
334 switch (crypto_info->cipher_type) {
335 case TLS_CIPHER_AES_GCM_128: {
336 struct tls12_crypto_info_aes_gcm_128 *
337 crypto_info_aes_gcm_128 =
338 container_of(crypto_info,
339 struct tls12_crypto_info_aes_gcm_128,
342 if (len != sizeof(*crypto_info_aes_gcm_128)) {
347 memcpy(crypto_info_aes_gcm_128->iv,
348 ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
349 TLS_CIPHER_AES_GCM_128_IV_SIZE);
350 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
351 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
353 if (copy_to_user(optval,
354 crypto_info_aes_gcm_128,
355 sizeof(*crypto_info_aes_gcm_128)))
359 case TLS_CIPHER_AES_GCM_256: {
360 struct tls12_crypto_info_aes_gcm_256 *
361 crypto_info_aes_gcm_256 =
362 container_of(crypto_info,
363 struct tls12_crypto_info_aes_gcm_256,
366 if (len != sizeof(*crypto_info_aes_gcm_256)) {
371 memcpy(crypto_info_aes_gcm_256->iv,
372 ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
373 TLS_CIPHER_AES_GCM_256_IV_SIZE);
374 memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
375 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
377 if (copy_to_user(optval,
378 crypto_info_aes_gcm_256,
379 sizeof(*crypto_info_aes_gcm_256)))
391 static int do_tls_getsockopt(struct sock *sk, int optname,
392 char __user *optval, int __user *optlen)
398 rc = do_tls_getsockopt_tx(sk, optval, optlen);
407 static int tls_getsockopt(struct sock *sk, int level, int optname,
408 char __user *optval, int __user *optlen)
410 struct tls_context *ctx = tls_get_ctx(sk);
412 if (level != SOL_TLS)
413 return ctx->getsockopt(sk, level, optname, optval, optlen);
415 return do_tls_getsockopt(sk, optname, optval, optlen);
418 static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
419 unsigned int optlen, int tx)
421 struct tls_crypto_info *crypto_info;
422 struct tls_crypto_info *alt_crypto_info;
423 struct tls_context *ctx = tls_get_ctx(sk);
428 if (!optval || (optlen < sizeof(*crypto_info))) {
434 crypto_info = &ctx->crypto_send.info;
435 alt_crypto_info = &ctx->crypto_recv.info;
437 crypto_info = &ctx->crypto_recv.info;
438 alt_crypto_info = &ctx->crypto_send.info;
441 /* Currently we don't support set crypto info more than one time */
442 if (TLS_CRYPTO_INFO_READY(crypto_info)) {
447 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
450 goto err_crypto_info;
454 if (crypto_info->version != TLS_1_2_VERSION &&
455 crypto_info->version != TLS_1_3_VERSION) {
457 goto err_crypto_info;
460 /* Ensure that TLS version and ciphers are same in both directions */
461 if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
462 if (alt_crypto_info->version != crypto_info->version ||
463 alt_crypto_info->cipher_type != crypto_info->cipher_type) {
465 goto err_crypto_info;
469 switch (crypto_info->cipher_type) {
470 case TLS_CIPHER_AES_GCM_128:
471 case TLS_CIPHER_AES_GCM_256: {
472 optsize = crypto_info->cipher_type == TLS_CIPHER_AES_GCM_128 ?
473 sizeof(struct tls12_crypto_info_aes_gcm_128) :
474 sizeof(struct tls12_crypto_info_aes_gcm_256);
475 if (optlen != optsize) {
477 goto err_crypto_info;
479 rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
480 optlen - sizeof(*crypto_info));
483 goto err_crypto_info;
489 goto err_crypto_info;
493 #ifdef CONFIG_TLS_DEVICE
494 rc = tls_set_device_offload(sk, ctx);
500 rc = tls_set_sw_offload(sk, ctx, 1);
504 #ifdef CONFIG_TLS_DEVICE
505 rc = tls_set_device_offload_rx(sk, ctx);
511 rc = tls_set_sw_offload(sk, ctx, 0);
517 goto err_crypto_info;
523 update_sk_prot(sk, ctx);
525 ctx->sk_write_space = sk->sk_write_space;
526 sk->sk_write_space = tls_write_space;
528 sk->sk_socket->ops = &tls_sw_proto_ops;
533 memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
538 static int do_tls_setsockopt(struct sock *sk, int optname,
539 char __user *optval, unsigned int optlen)
547 rc = do_tls_setsockopt_conf(sk, optval, optlen,
558 static int tls_setsockopt(struct sock *sk, int level, int optname,
559 char __user *optval, unsigned int optlen)
561 struct tls_context *ctx = tls_get_ctx(sk);
563 if (level != SOL_TLS)
564 return ctx->setsockopt(sk, level, optname, optval, optlen);
566 return do_tls_setsockopt(sk, optname, optval, optlen);
569 static struct tls_context *create_ctx(struct sock *sk)
571 struct inet_connection_sock *icsk = inet_csk(sk);
572 struct tls_context *ctx;
574 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
578 icsk->icsk_ulp_data = ctx;
579 ctx->setsockopt = sk->sk_prot->setsockopt;
580 ctx->getsockopt = sk->sk_prot->getsockopt;
581 ctx->sk_proto_close = sk->sk_prot->close;
585 static void tls_build_proto(struct sock *sk)
587 int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
589 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
590 if (ip_ver == TLSV6 &&
591 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
592 mutex_lock(&tcpv6_prot_mutex);
593 if (likely(sk->sk_prot != saved_tcpv6_prot)) {
594 build_protos(tls_prots[TLSV6], sk->sk_prot);
595 smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
597 mutex_unlock(&tcpv6_prot_mutex);
600 if (ip_ver == TLSV4 &&
601 unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
602 mutex_lock(&tcpv4_prot_mutex);
603 if (likely(sk->sk_prot != saved_tcpv4_prot)) {
604 build_protos(tls_prots[TLSV4], sk->sk_prot);
605 smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
607 mutex_unlock(&tcpv4_prot_mutex);
611 static void tls_hw_sk_destruct(struct sock *sk)
613 struct tls_context *ctx = tls_get_ctx(sk);
614 struct inet_connection_sock *icsk = inet_csk(sk);
616 ctx->sk_destruct(sk);
619 icsk->icsk_ulp_data = NULL;
622 static int tls_hw_prot(struct sock *sk)
624 struct tls_context *ctx;
625 struct tls_device *dev;
628 spin_lock_bh(&device_spinlock);
629 list_for_each_entry(dev, &device_list, dev_list) {
630 if (dev->feature && dev->feature(dev)) {
631 ctx = create_ctx(sk);
635 spin_unlock_bh(&device_spinlock);
637 ctx->hash = sk->sk_prot->hash;
638 ctx->unhash = sk->sk_prot->unhash;
639 ctx->sk_proto_close = sk->sk_prot->close;
640 ctx->sk_destruct = sk->sk_destruct;
641 sk->sk_destruct = tls_hw_sk_destruct;
642 ctx->rx_conf = TLS_HW_RECORD;
643 ctx->tx_conf = TLS_HW_RECORD;
644 update_sk_prot(sk, ctx);
645 spin_lock_bh(&device_spinlock);
651 spin_unlock_bh(&device_spinlock);
655 static void tls_hw_unhash(struct sock *sk)
657 struct tls_context *ctx = tls_get_ctx(sk);
658 struct tls_device *dev;
660 spin_lock_bh(&device_spinlock);
661 list_for_each_entry(dev, &device_list, dev_list) {
663 kref_get(&dev->kref);
664 spin_unlock_bh(&device_spinlock);
665 dev->unhash(dev, sk);
666 kref_put(&dev->kref, dev->release);
667 spin_lock_bh(&device_spinlock);
670 spin_unlock_bh(&device_spinlock);
674 static int tls_hw_hash(struct sock *sk)
676 struct tls_context *ctx = tls_get_ctx(sk);
677 struct tls_device *dev;
681 spin_lock_bh(&device_spinlock);
682 list_for_each_entry(dev, &device_list, dev_list) {
684 kref_get(&dev->kref);
685 spin_unlock_bh(&device_spinlock);
686 err |= dev->hash(dev, sk);
687 kref_put(&dev->kref, dev->release);
688 spin_lock_bh(&device_spinlock);
691 spin_unlock_bh(&device_spinlock);
698 static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
701 prot[TLS_BASE][TLS_BASE] = *base;
702 prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
703 prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
704 prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
706 prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
707 prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
708 prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
710 prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
711 prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
712 prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
713 prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
715 prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
716 prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
717 prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
718 prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
720 #ifdef CONFIG_TLS_DEVICE
721 prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
722 prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
723 prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
725 prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
726 prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
727 prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
729 prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
731 prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
733 prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
736 prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
737 prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
738 prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
739 prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
742 static int tls_init(struct sock *sk)
744 struct tls_context *ctx;
750 /* The TLS ulp is currently supported only for TCP sockets
751 * in ESTABLISHED state.
752 * Supporting sockets in LISTEN state will require us
753 * to modify the accept implementation to clone rather then
754 * share the ulp context.
756 if (sk->sk_state != TCP_ESTABLISHED)
759 /* allocate tls context */
760 ctx = create_ctx(sk);
767 ctx->tx_conf = TLS_BASE;
768 ctx->rx_conf = TLS_BASE;
769 update_sk_prot(sk, ctx);
774 void tls_register_device(struct tls_device *device)
776 spin_lock_bh(&device_spinlock);
777 list_add_tail(&device->dev_list, &device_list);
778 spin_unlock_bh(&device_spinlock);
780 EXPORT_SYMBOL(tls_register_device);
782 void tls_unregister_device(struct tls_device *device)
784 spin_lock_bh(&device_spinlock);
785 list_del(&device->dev_list);
786 spin_unlock_bh(&device_spinlock);
788 EXPORT_SYMBOL(tls_unregister_device);
790 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
792 .owner = THIS_MODULE,
796 static int __init tls_register(void)
798 tls_sw_proto_ops = inet_stream_ops;
799 tls_sw_proto_ops.splice_read = tls_sw_splice_read;
801 #ifdef CONFIG_TLS_DEVICE
804 tcp_register_ulp(&tcp_tls_ulp_ops);
809 static void __exit tls_unregister(void)
811 tcp_unregister_ulp(&tcp_tls_ulp_ops);
812 #ifdef CONFIG_TLS_DEVICE
813 tls_device_cleanup();
817 module_init(tls_register);
818 module_exit(tls_unregister);