1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <linux/ipv6.h>
6 #include <linux/skbuff.h>
10 #include "../nfp_net.h"
14 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
15 (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
16 BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
17 BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
18 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
20 #define NFP_NET_TLS_OPCODE_MASK_RX \
21 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
23 #define NFP_NET_TLS_OPCODE_MASK_TX \
24 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
26 #define NFP_NET_TLS_OPCODE_MASK \
27 (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
29 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
33 off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
35 val = nn_readl(nn, off);
37 val |= BIT(opcode & 31);
39 val &= ~BIT(opcode & 31);
40 nn_writel(nn, off, val);
44 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
45 enum tls_offload_ctx_dir direction)
50 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
51 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
52 nn->ktls_tx_conn_cnt += add;
53 cnt = nn->ktls_tx_conn_cnt;
54 nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
56 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
57 nn->ktls_rx_conn_cnt += add;
58 cnt = nn->ktls_rx_conn_cnt;
61 /* Care only about 0 -> 1 and 1 -> 0 transitions */
65 nfp_net_crypto_set_op(nn, opcode, cnt);
70 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
71 enum tls_offload_ctx_dir direction)
75 /* Use the BAR lock to protect the connection counts */
77 if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
78 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
79 /* Undo the cnt adjustment if failed */
81 __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
83 nn_ctrl_bar_unlock(nn);
89 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
91 return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
95 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
97 return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
100 static struct sk_buff *
101 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
103 return nfp_ccm_mbox_msg_alloc(nn, req_sz,
104 sizeof(struct nfp_crypto_reply_simple),
109 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
110 const char *name, enum nfp_ccm_type type)
112 struct nfp_crypto_reply_simple *reply;
115 err = __nfp_ccm_mbox_communicate(nn, skb, type,
116 sizeof(*reply), sizeof(*reply),
117 type == NFP_CCM_TYPE_CRYPTO_DEL);
119 nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
123 reply = (void *)skb->data;
124 err = -be32_to_cpu(reply->error);
126 nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
128 dev_consume_skb_any(skb);
133 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
135 struct nfp_crypto_req_del *req;
138 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
142 req = (void *)skb->data;
144 memcpy(req->handle, fw_handle, sizeof(req->handle));
146 nfp_net_tls_communicate_simple(nn, skb, "delete",
147 NFP_CCM_TYPE_CRYPTO_DEL);
150 static struct nfp_crypto_req_add_back *
151 nfp_net_tls_set_ipv4(struct nfp_crypto_req_add_v4 *req, struct sock *sk,
154 struct inet_sock *inet = inet_sk(sk);
156 req->front.key_len += sizeof(__be32) * 2;
157 req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 4) |
158 FIELD_PREP(NFP_NET_TLS_VLAN,
159 NFP_NET_TLS_VLAN_UNUSED));
161 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
162 req->src_ip = inet->inet_saddr;
163 req->dst_ip = inet->inet_daddr;
165 req->src_ip = inet->inet_daddr;
166 req->dst_ip = inet->inet_saddr;
172 static struct nfp_crypto_req_add_back *
173 nfp_net_tls_set_ipv6(struct nfp_crypto_req_add_v6 *req, struct sock *sk,
176 #if IS_ENABLED(CONFIG_IPV6)
177 struct ipv6_pinfo *np = inet6_sk(sk);
179 req->front.key_len += sizeof(struct in6_addr) * 2;
180 req->front.ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, 6) |
181 FIELD_PREP(NFP_NET_TLS_VLAN,
182 NFP_NET_TLS_VLAN_UNUSED));
184 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
185 memcpy(req->src_ip, &np->saddr, sizeof(req->src_ip));
186 memcpy(req->dst_ip, &sk->sk_v6_daddr, sizeof(req->dst_ip));
188 memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
189 memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
197 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
198 struct nfp_crypto_req_add_back *back, struct sock *sk,
201 struct inet_sock *inet = inet_sk(sk);
203 front->l4_proto = IPPROTO_TCP;
205 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
206 back->src_port = inet->inet_sport;
207 back->dst_port = inet->inet_dport;
209 back->src_port = inet->inet_dport;
210 back->dst_port = inet->inet_sport;
214 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
217 case TLS_OFFLOAD_CTX_DIR_TX:
218 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
219 case TLS_OFFLOAD_CTX_DIR_RX:
220 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
228 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
229 enum tls_offload_ctx_dir direction)
233 switch (cipher_type) {
234 case TLS_CIPHER_AES_GCM_128:
235 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
236 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
238 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
244 return nn->tlv_caps.crypto_ops & BIT(bit);
248 nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
249 enum tls_offload_ctx_dir direction,
250 struct tls_crypto_info *crypto_info,
251 u32 start_offload_tcp_sn)
253 struct tls12_crypto_info_aes_gcm_128 *tls_ci;
254 struct nfp_net *nn = netdev_priv(netdev);
255 struct nfp_crypto_req_add_front *front;
256 struct nfp_net_tls_offload_ctx *ntls;
257 struct nfp_crypto_req_add_back *back;
258 struct nfp_crypto_reply_add *reply;
264 BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
265 TLS_DRIVER_STATE_SIZE_TX);
266 BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
267 TLS_DRIVER_STATE_SIZE_RX);
269 if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
272 switch (sk->sk_family) {
273 #if IS_ENABLED(CONFIG_IPV6)
275 if (sk->sk_ipv6only ||
276 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
277 req_sz = sizeof(struct nfp_crypto_req_add_v6);
284 req_sz = sizeof(struct nfp_crypto_req_add_v4);
291 err = nfp_net_tls_conn_add(nn, direction);
295 skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
298 goto err_conn_remove;
301 front = (void *)skb->data;
304 front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
305 memset(front->resv, 0, sizeof(front->resv));
308 back = nfp_net_tls_set_ipv6((void *)skb->data, sk, direction);
310 back = nfp_net_tls_set_ipv4((void *)skb->data, sk, direction);
312 nfp_net_tls_set_l4(front, back, sk, direction);
315 back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
317 tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
318 memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
319 memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
320 sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
321 memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
322 memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
323 memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
325 err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
326 sizeof(*reply), sizeof(*reply));
328 nn_dp_warn(&nn->dp, "failed to add TLS: %d\n", err);
329 /* communicate frees skb on error */
330 goto err_conn_remove;
333 reply = (void *)skb->data;
334 err = -be32_to_cpu(reply->error);
336 if (err == -ENOSPC) {
337 if (!atomic_fetch_inc(&nn->ktls_no_space))
338 nn_info(nn, "HW TLS table full\n");
341 "failed to add TLS, FW replied: %d\n", err);
346 if (!reply->handle[0] && !reply->handle[1]) {
347 nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
352 ntls = tls_driver_ctx(sk, direction);
353 memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
354 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
355 ntls->next_seq = start_offload_tcp_sn;
356 dev_consume_skb_any(skb);
358 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
361 tls_offload_rx_resync_set_type(sk,
362 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
366 nfp_net_tls_del_fw(nn, reply->handle);
368 dev_consume_skb_any(skb);
370 nfp_net_tls_conn_remove(nn, direction);
375 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
376 enum tls_offload_ctx_dir direction)
378 struct nfp_net *nn = netdev_priv(netdev);
379 struct nfp_net_tls_offload_ctx *ntls;
381 nfp_net_tls_conn_remove(nn, direction);
383 ntls = __tls_driver_ctx(tls_ctx, direction);
384 nfp_net_tls_del_fw(nn, ntls->fw_handle);
388 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
389 u8 *rcd_sn, enum tls_offload_ctx_dir direction)
391 struct nfp_net *nn = netdev_priv(netdev);
392 struct nfp_net_tls_offload_ctx *ntls;
393 struct nfp_crypto_req_update *req;
397 flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
398 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
402 ntls = tls_driver_ctx(sk, direction);
403 req = (void *)skb->data;
405 req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
406 memset(req->resv, 0, sizeof(req->resv));
407 memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
408 req->tcp_seq = cpu_to_be32(seq);
409 memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
411 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
412 nfp_net_tls_communicate_simple(nn, skb, "sync",
413 NFP_CCM_TYPE_CRYPTO_UPDATE);
414 ntls->next_seq = seq;
416 nfp_ccm_mbox_post(nn, skb, NFP_CCM_TYPE_CRYPTO_UPDATE,
417 sizeof(struct nfp_crypto_reply_simple));
421 static const struct tlsdev_ops nfp_net_tls_ops = {
422 .tls_dev_add = nfp_net_tls_add,
423 .tls_dev_del = nfp_net_tls_del,
424 .tls_dev_resync = nfp_net_tls_resync,
427 static int nfp_net_tls_reset(struct nfp_net *nn)
429 struct nfp_crypto_req_reset *req;
432 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
436 req = (void *)skb->data;
439 return nfp_net_tls_communicate_simple(nn, skb, "reset",
440 NFP_CCM_TYPE_CRYPTO_RESET);
443 int nfp_net_tls_init(struct nfp_net *nn)
445 struct net_device *netdev = nn->dp.netdev;
448 if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
451 if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
452 NFP_NET_TLS_CCM_MBOX_OPS_MASK)
455 if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
456 nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
457 nn->tlv_caps.mbox_len);
461 err = nfp_net_tls_reset(nn);
465 nn_ctrl_bar_lock(nn);
466 nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
467 err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
468 nn_ctrl_bar_unlock(nn);
472 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
473 netdev->hw_features |= NETIF_F_HW_TLS_RX;
474 netdev->features |= NETIF_F_HW_TLS_RX;
476 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
477 netdev->hw_features |= NETIF_F_HW_TLS_TX;
478 netdev->features |= NETIF_F_HW_TLS_TX;
481 netdev->tlsdev_ops = &nfp_net_tls_ops;