2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/if_arp.h>
36 #include <linux/netdevice.h>
38 #include <linux/if_vlan.h>
39 #include <net/udp_tunnel.h>
40 #include <net/sch_generic.h>
41 #include <linux/netfilter.h>
42 #include <rdma/ib_addr.h>
48 static LIST_HEAD(rxe_dev_list);
49 static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
51 struct rxe_dev *net_to_rxe(struct net_device *ndev)
54 struct rxe_dev *found = NULL;
56 spin_lock_bh(&dev_list_lock);
57 list_for_each_entry(rxe, &rxe_dev_list, list) {
58 if (rxe->ndev == ndev) {
63 spin_unlock_bh(&dev_list_lock);
68 struct rxe_dev *get_rxe_by_name(const char *name)
71 struct rxe_dev *found = NULL;
73 spin_lock_bh(&dev_list_lock);
74 list_for_each_entry(rxe, &rxe_dev_list, list) {
75 if (!strcmp(name, rxe->ib_dev.name)) {
80 spin_unlock_bh(&dev_list_lock);
85 struct rxe_recv_sockets recv_sockets;
87 static __be64 rxe_mac_to_eui64(struct net_device *ndev)
89 unsigned char *mac_addr = ndev->dev_addr;
91 unsigned char *dst = (unsigned char *)&eui64;
93 dst[0] = mac_addr[0] ^ 2;
100 dst[7] = mac_addr[5];
105 static __be64 node_guid(struct rxe_dev *rxe)
107 return rxe_mac_to_eui64(rxe->ndev);
110 static __be64 port_guid(struct rxe_dev *rxe)
112 return rxe_mac_to_eui64(rxe->ndev);
115 static struct device *dma_device(struct rxe_dev *rxe)
117 struct net_device *ndev;
121 if (ndev->priv_flags & IFF_802_1Q_VLAN)
122 ndev = vlan_dev_real_dev(ndev);
124 return ndev->dev.parent;
127 static int mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
130 unsigned char ll_addr[ETH_ALEN];
132 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
133 err = dev_mc_add(rxe->ndev, ll_addr);
138 static int mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
141 unsigned char ll_addr[ETH_ALEN];
143 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
144 err = dev_mc_del(rxe->ndev, ll_addr);
149 static struct dst_entry *rxe_find_route4(struct net_device *ndev,
150 struct in_addr *saddr,
151 struct in_addr *daddr)
154 struct flowi4 fl = { { 0 } };
156 memset(&fl, 0, sizeof(fl));
157 fl.flowi4_oif = ndev->ifindex;
158 memcpy(&fl.saddr, saddr, sizeof(*saddr));
159 memcpy(&fl.daddr, daddr, sizeof(*daddr));
160 fl.flowi4_proto = IPPROTO_UDP;
162 rt = ip_route_output_key(&init_net, &fl);
164 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
171 #if IS_ENABLED(CONFIG_IPV6)
172 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
173 struct in6_addr *saddr,
174 struct in6_addr *daddr)
176 struct dst_entry *ndst;
177 struct flowi6 fl6 = { { 0 } };
179 memset(&fl6, 0, sizeof(fl6));
180 fl6.flowi6_oif = ndev->ifindex;
181 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
182 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
183 fl6.flowi6_proto = IPPROTO_UDP;
185 if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
186 recv_sockets.sk6->sk, &ndst, &fl6))) {
187 pr_err_ratelimited("no route to %pI6\n", daddr);
191 if (unlikely(ndst->error)) {
192 pr_err("no route to %pI6\n", daddr);
204 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
205 struct in6_addr *saddr,
206 struct in6_addr *daddr)
213 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
216 struct net_device *ndev = skb->dev;
217 struct rxe_dev *rxe = net_to_rxe(ndev);
218 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
223 if (skb_linearize(skb)) {
224 pr_err("skb_linearize failed\n");
231 pkt->hdr = (u8 *)(udph + 1);
232 pkt->mask = RXE_GRH_MASK;
233 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
241 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
246 struct udp_port_cfg udp_cfg = {0};
247 struct udp_tunnel_sock_cfg tnl_cfg = {0};
250 udp_cfg.family = AF_INET6;
251 udp_cfg.ipv6_v6only = 1;
253 udp_cfg.family = AF_INET;
256 udp_cfg.local_udp_port = port;
258 /* Create UDP socket */
259 err = udp_sock_create(net, &udp_cfg, &sock);
261 pr_err("failed to create udp socket. err = %d\n", err);
265 tnl_cfg.encap_type = 1;
266 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
268 /* Setup UDP tunnel */
269 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
274 void rxe_release_udp_tunnel(struct socket *sk)
277 udp_tunnel_sock_release(sk);
280 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
285 __skb_push(skb, sizeof(*udph));
286 skb_reset_transport_header(skb);
289 udph->dest = dst_port;
290 udph->source = src_port;
291 udph->len = htons(skb->len);
295 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
296 __be32 saddr, __be32 daddr, __u8 proto,
297 __u8 tos, __u8 ttl, __be16 df, bool xnet)
301 skb_scrub_packet(skb, xnet);
304 skb_dst_set(skb, dst);
305 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
307 skb_push(skb, sizeof(struct iphdr));
308 skb_reset_network_header(skb);
312 iph->version = IPVERSION;
313 iph->ihl = sizeof(struct iphdr) >> 2;
315 iph->protocol = proto;
320 __ip_select_ident(dev_net(dst->dev), iph,
321 skb_shinfo(skb)->gso_segs ?: 1);
322 iph->tot_len = htons(skb->len);
326 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
327 struct in6_addr *saddr, struct in6_addr *daddr,
328 __u8 proto, __u8 prio, __u8 ttl)
330 struct ipv6hdr *ip6h;
332 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
333 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
335 skb_dst_set(skb, dst);
337 __skb_push(skb, sizeof(*ip6h));
338 skb_reset_network_header(skb);
339 ip6h = ipv6_hdr(skb);
340 ip6_flow_hdr(ip6h, prio, htonl(0));
341 ip6h->payload_len = htons(skb->len);
342 ip6h->nexthdr = proto;
343 ip6h->hop_limit = ttl;
344 ip6h->daddr = *daddr;
345 ip6h->saddr = *saddr;
346 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
349 static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
350 struct sk_buff *skb, struct rxe_av *av)
352 struct dst_entry *dst;
354 __be16 df = htons(IP_DF);
355 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
356 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
358 dst = rxe_find_route4(rxe->ndev, saddr, daddr);
360 pr_err("Host not reachable\n");
361 return -EHOSTUNREACH;
364 if (!memcmp(saddr, daddr, sizeof(*daddr)))
365 pkt->mask |= RXE_LOOPBACK_MASK;
367 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
368 htons(ROCE_V2_UDP_DPORT));
370 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
371 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
375 static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
376 struct sk_buff *skb, struct rxe_av *av)
378 struct dst_entry *dst;
379 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
380 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
382 dst = rxe_find_route6(rxe->ndev, saddr, daddr);
384 pr_err("Host not reachable\n");
385 return -EHOSTUNREACH;
388 if (!memcmp(saddr, daddr, sizeof(*daddr)))
389 pkt->mask |= RXE_LOOPBACK_MASK;
391 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
392 htons(ROCE_V2_UDP_DPORT));
394 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
395 av->grh.traffic_class,
400 static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
401 struct sk_buff *skb, u32 *crc)
404 struct rxe_av *av = rxe_get_av(pkt);
406 if (av->network_type == RDMA_NETWORK_IPV4)
407 err = prepare4(rxe, pkt, skb, av);
408 else if (av->network_type == RDMA_NETWORK_IPV6)
409 err = prepare6(rxe, pkt, skb, av);
411 *crc = rxe_icrc_hdr(pkt, skb);
416 static void rxe_skb_tx_dtor(struct sk_buff *skb)
418 struct sock *sk = skb->sk;
419 struct rxe_qp *qp = sk->sk_user_data;
420 int skb_out = atomic_dec_return(&qp->skb_out);
422 if (unlikely(qp->need_req_skb &&
423 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
424 rxe_run_task(&qp->req.task, 1);
427 static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
430 struct sk_buff *nskb;
434 av = rxe_get_av(pkt);
436 nskb = skb_clone(skb, GFP_ATOMIC);
440 nskb->destructor = rxe_skb_tx_dtor;
441 nskb->sk = pkt->qp->sk->sk;
443 if (av->network_type == RDMA_NETWORK_IPV4) {
444 err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
445 } else if (av->network_type == RDMA_NETWORK_IPV6) {
446 err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
448 pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
453 if (unlikely(net_xmit_eval(err))) {
454 pr_debug("error sending packet: %d\n", err);
458 atomic_inc(&pkt->qp->skb_out);
464 static int loopback(struct sk_buff *skb)
469 static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
471 return rxe->port.port_guid == av->grh.dgid.global.interface_id;
474 static struct sk_buff *init_packet(struct rxe_dev *rxe, struct rxe_av *av,
475 int paylen, struct rxe_pkt_info *pkt)
477 unsigned int hdr_len;
480 if (av->network_type == RDMA_NETWORK_IPV4)
481 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
482 sizeof(struct iphdr);
484 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
485 sizeof(struct ipv6hdr);
487 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev),
492 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
494 skb->dev = rxe->ndev;
495 if (av->network_type == RDMA_NETWORK_IPV4)
496 skb->protocol = htons(ETH_P_IP);
498 skb->protocol = htons(ETH_P_IPV6);
502 pkt->hdr = skb_put(skb, paylen);
503 pkt->mask |= RXE_GRH_MASK;
505 memset(pkt->hdr, 0, paylen);
511 * this is required by rxe_cfg to match rxe devices in
512 * /sys/class/infiniband up with their underlying ethernet devices
514 static char *parent_name(struct rxe_dev *rxe, unsigned int port_num)
516 return rxe->ndev->name;
519 static enum rdma_link_layer link_layer(struct rxe_dev *rxe,
520 unsigned int port_num)
522 return IB_LINK_LAYER_ETHERNET;
525 static struct rxe_ifc_ops ifc_ops = {
526 .node_guid = node_guid,
527 .port_guid = port_guid,
528 .dma_device = dma_device,
529 .mcast_add = mcast_add,
530 .mcast_delete = mcast_delete,
533 .loopback = loopback,
534 .init_packet = init_packet,
535 .parent_name = parent_name,
536 .link_layer = link_layer,
539 struct rxe_dev *rxe_net_add(struct net_device *ndev)
542 struct rxe_dev *rxe = NULL;
544 rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
548 rxe->ifc_ops = &ifc_ops;
551 err = rxe_add(rxe, ndev->mtu);
553 ib_dealloc_device(&rxe->ib_dev);
557 spin_lock_bh(&dev_list_lock);
558 list_add_tail(&rxe->list, &rxe_dev_list);
559 spin_unlock_bh(&dev_list_lock);
563 void rxe_remove_all(void)
565 spin_lock_bh(&dev_list_lock);
566 while (!list_empty(&rxe_dev_list)) {
567 struct rxe_dev *rxe =
568 list_first_entry(&rxe_dev_list, struct rxe_dev, list);
570 list_del(&rxe->list);
571 spin_unlock_bh(&dev_list_lock);
573 spin_lock_bh(&dev_list_lock);
575 spin_unlock_bh(&dev_list_lock);
577 EXPORT_SYMBOL(rxe_remove_all);
579 static void rxe_port_event(struct rxe_dev *rxe,
580 enum ib_event_type event)
584 ev.device = &rxe->ib_dev;
585 ev.element.port_num = 1;
588 ib_dispatch_event(&ev);
591 /* Caller must hold net_info_lock */
592 void rxe_port_up(struct rxe_dev *rxe)
594 struct rxe_port *port;
597 port->attr.state = IB_PORT_ACTIVE;
598 port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
600 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
601 pr_info("set %s active\n", rxe->ib_dev.name);
604 /* Caller must hold net_info_lock */
605 void rxe_port_down(struct rxe_dev *rxe)
607 struct rxe_port *port;
610 port->attr.state = IB_PORT_DOWN;
611 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
613 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
614 pr_info("set %s down\n", rxe->ib_dev.name);
617 static int rxe_notify(struct notifier_block *not_blk,
621 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
622 struct rxe_dev *rxe = net_to_rxe(ndev);
628 case NETDEV_UNREGISTER:
629 list_del(&rxe->list);
638 case NETDEV_CHANGEMTU:
639 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
640 rxe_set_mtu(rxe, ndev->mtu);
644 case NETDEV_GOING_DOWN:
645 case NETDEV_CHANGEADDR:
646 case NETDEV_CHANGENAME:
647 case NETDEV_FEAT_CHANGE:
649 pr_info("ignoring netdev event = %ld for %s\n",
657 struct notifier_block rxe_net_notifier = {
658 .notifier_call = rxe_notify,
661 int rxe_net_ipv4_init(void)
663 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
664 htons(ROCE_V2_UDP_DPORT), false);
665 if (IS_ERR(recv_sockets.sk4)) {
666 recv_sockets.sk4 = NULL;
667 pr_err("Failed to create IPv4 UDP tunnel\n");
674 int rxe_net_ipv6_init(void)
676 #if IS_ENABLED(CONFIG_IPV6)
678 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
679 htons(ROCE_V2_UDP_DPORT), true);
680 if (IS_ERR(recv_sockets.sk6)) {
681 recv_sockets.sk6 = NULL;
682 pr_err("Failed to create IPv6 UDP tunnel\n");
689 void rxe_net_exit(void)
691 rxe_release_udp_tunnel(recv_sockets.sk6);
692 rxe_release_udp_tunnel(recv_sockets.sk4);
693 unregister_netdevice_notifier(&rxe_net_notifier);
696 int rxe_net_init(void)
700 recv_sockets.sk6 = NULL;
702 err = rxe_net_ipv4_init();
705 err = rxe_net_ipv6_init();
708 err = register_netdevice_notifier(&rxe_net_notifier);
710 pr_err("Failed to register netdev notifier\n");