2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/if_arp.h>
36 #include <linux/netdevice.h>
38 #include <linux/if_vlan.h>
39 #include <net/udp_tunnel.h>
40 #include <net/sch_generic.h>
41 #include <linux/netfilter.h>
42 #include <rdma/ib_addr.h>
48 static struct rxe_recv_sockets recv_sockets;
50 struct device *rxe_dma_device(struct rxe_dev *rxe)
52 struct net_device *ndev;
56 if (is_vlan_dev(ndev))
57 ndev = vlan_dev_real_dev(ndev);
59 return ndev->dev.parent;
62 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
65 unsigned char ll_addr[ETH_ALEN];
67 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
68 err = dev_mc_add(rxe->ndev, ll_addr);
73 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
76 unsigned char ll_addr[ETH_ALEN];
78 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
79 err = dev_mc_del(rxe->ndev, ll_addr);
84 static struct dst_entry *rxe_find_route4(struct net_device *ndev,
85 struct in_addr *saddr,
86 struct in_addr *daddr)
89 struct flowi4 fl = { { 0 } };
91 memset(&fl, 0, sizeof(fl));
92 fl.flowi4_oif = ndev->ifindex;
93 memcpy(&fl.saddr, saddr, sizeof(*saddr));
94 memcpy(&fl.daddr, daddr, sizeof(*daddr));
95 fl.flowi4_proto = IPPROTO_UDP;
97 rt = ip_route_output_key(&init_net, &fl);
99 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
106 #if IS_ENABLED(CONFIG_IPV6)
107 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
108 struct in6_addr *saddr,
109 struct in6_addr *daddr)
111 struct dst_entry *ndst;
112 struct flowi6 fl6 = { { 0 } };
114 memset(&fl6, 0, sizeof(fl6));
115 fl6.flowi6_oif = ndev->ifindex;
116 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
117 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
118 fl6.flowi6_proto = IPPROTO_UDP;
120 ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
121 recv_sockets.sk6->sk, &fl6,
123 if (unlikely(IS_ERR(ndst))) {
124 pr_err_ratelimited("no route to %pI6\n", daddr);
128 if (unlikely(ndst->error)) {
129 pr_err("no route to %pI6\n", daddr);
141 static struct dst_entry *rxe_find_route6(struct net_device *ndev,
142 struct in6_addr *saddr,
143 struct in6_addr *daddr)
150 static struct dst_entry *rxe_find_route(struct net_device *ndev,
154 struct dst_entry *dst = NULL;
156 if (qp_type(qp) == IB_QPT_RC)
157 dst = sk_dst_get(qp->sk->sk);
159 if (!dst || !dst_check(dst, qp->dst_cookie)) {
163 if (av->network_type == RDMA_NETWORK_IPV4) {
164 struct in_addr *saddr;
165 struct in_addr *daddr;
167 saddr = &av->sgid_addr._sockaddr_in.sin_addr;
168 daddr = &av->dgid_addr._sockaddr_in.sin_addr;
169 dst = rxe_find_route4(ndev, saddr, daddr);
170 } else if (av->network_type == RDMA_NETWORK_IPV6) {
171 struct in6_addr *saddr6;
172 struct in6_addr *daddr6;
174 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
175 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
176 dst = rxe_find_route6(ndev, saddr6, daddr6);
177 #if IS_ENABLED(CONFIG_IPV6)
180 rt6_get_cookie((struct rt6_info *)dst);
184 if (dst && (qp_type(qp) == IB_QPT_RC)) {
186 sk_dst_set(qp->sk->sk, dst);
192 static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
195 struct net_device *ndev = skb->dev;
196 struct net_device *rdev = ndev;
197 struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
198 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
200 if (!rxe && is_vlan_dev(rdev)) {
201 rdev = vlan_dev_real_dev(ndev);
202 rxe = rxe_get_dev_from_net(rdev);
207 if (skb_linearize(skb)) {
208 pr_err("skb_linearize failed\n");
209 ib_device_put(&rxe->ib_dev);
216 pkt->hdr = (u8 *)(udph + 1);
217 pkt->mask = RXE_GRH_MASK;
218 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
223 * FIXME: this is in the wrong place, it needs to be done when pkt is
226 ib_device_put(&rxe->ib_dev);
235 static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
240 struct udp_port_cfg udp_cfg = { };
241 struct udp_tunnel_sock_cfg tnl_cfg = { };
244 udp_cfg.family = AF_INET6;
245 udp_cfg.ipv6_v6only = 1;
247 udp_cfg.family = AF_INET;
250 udp_cfg.local_udp_port = port;
252 /* Create UDP socket */
253 err = udp_sock_create(net, &udp_cfg, &sock);
255 pr_err("failed to create udp socket. err = %d\n", err);
259 tnl_cfg.encap_type = 1;
260 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
262 /* Setup UDP tunnel */
263 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
268 static void rxe_release_udp_tunnel(struct socket *sk)
271 udp_tunnel_sock_release(sk);
274 static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
279 __skb_push(skb, sizeof(*udph));
280 skb_reset_transport_header(skb);
283 udph->dest = dst_port;
284 udph->source = src_port;
285 udph->len = htons(skb->len);
289 static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
290 __be32 saddr, __be32 daddr, __u8 proto,
291 __u8 tos, __u8 ttl, __be16 df, bool xnet)
295 skb_scrub_packet(skb, xnet);
298 skb_dst_set(skb, dst_clone(dst));
299 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
301 skb_push(skb, sizeof(struct iphdr));
302 skb_reset_network_header(skb);
306 iph->version = IPVERSION;
307 iph->ihl = sizeof(struct iphdr) >> 2;
309 iph->protocol = proto;
314 __ip_select_ident(dev_net(dst->dev), iph,
315 skb_shinfo(skb)->gso_segs ?: 1);
316 iph->tot_len = htons(skb->len);
320 static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
321 struct in6_addr *saddr, struct in6_addr *daddr,
322 __u8 proto, __u8 prio, __u8 ttl)
324 struct ipv6hdr *ip6h;
326 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
327 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
329 skb_dst_set(skb, dst_clone(dst));
331 __skb_push(skb, sizeof(*ip6h));
332 skb_reset_network_header(skb);
333 ip6h = ipv6_hdr(skb);
334 ip6_flow_hdr(ip6h, prio, htonl(0));
335 ip6h->payload_len = htons(skb->len);
336 ip6h->nexthdr = proto;
337 ip6h->hop_limit = ttl;
338 ip6h->daddr = *daddr;
339 ip6h->saddr = *saddr;
340 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
343 static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
345 struct rxe_qp *qp = pkt->qp;
346 struct dst_entry *dst;
348 __be16 df = htons(IP_DF);
349 struct rxe_av *av = rxe_get_av(pkt);
350 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
351 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
353 dst = rxe_find_route(skb->dev, qp, av);
355 pr_err("Host not reachable\n");
356 return -EHOSTUNREACH;
359 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
360 cpu_to_be16(ROCE_V2_UDP_DPORT));
362 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
363 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
369 static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
371 struct rxe_qp *qp = pkt->qp;
372 struct dst_entry *dst;
373 struct rxe_av *av = rxe_get_av(pkt);
374 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
375 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
377 dst = rxe_find_route(skb->dev, qp, av);
379 pr_err("Host not reachable\n");
380 return -EHOSTUNREACH;
383 prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
384 cpu_to_be16(ROCE_V2_UDP_DPORT));
386 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
387 av->grh.traffic_class,
394 int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
398 if (skb->protocol == htons(ETH_P_IP))
399 err = prepare4(pkt, skb);
400 else if (skb->protocol == htons(ETH_P_IPV6))
401 err = prepare6(pkt, skb);
403 *crc = rxe_icrc_hdr(pkt, skb);
405 if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
406 pkt->mask |= RXE_LOOPBACK_MASK;
411 static void rxe_skb_tx_dtor(struct sk_buff *skb)
413 struct sock *sk = skb->sk;
414 struct rxe_qp *qp = sk->sk_user_data;
415 int skb_out = atomic_dec_return(&qp->skb_out);
417 if (unlikely(qp->need_req_skb &&
418 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
419 rxe_run_task(&qp->req.task, 1);
424 int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
428 skb->destructor = rxe_skb_tx_dtor;
429 skb->sk = pkt->qp->sk->sk;
431 rxe_add_ref(pkt->qp);
432 atomic_inc(&pkt->qp->skb_out);
434 if (skb->protocol == htons(ETH_P_IP)) {
435 err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
436 } else if (skb->protocol == htons(ETH_P_IPV6)) {
437 err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
439 pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
440 atomic_dec(&pkt->qp->skb_out);
441 rxe_drop_ref(pkt->qp);
446 if (unlikely(net_xmit_eval(err))) {
447 pr_debug("error sending packet: %d\n", err);
454 void rxe_loopback(struct sk_buff *skb)
459 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
460 int paylen, struct rxe_pkt_info *pkt)
462 unsigned int hdr_len;
463 struct sk_buff *skb = NULL;
464 struct net_device *ndev;
465 const struct ib_gid_attr *attr;
466 const int port_num = 1;
468 attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
472 if (av->network_type == RDMA_NETWORK_IPV4)
473 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
474 sizeof(struct iphdr);
476 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
477 sizeof(struct ipv6hdr);
480 ndev = rdma_read_gid_attr_ndev_rcu(attr);
485 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
488 if (unlikely(!skb)) {
493 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
495 /* FIXME: hold reference to this netdev until life of this skb. */
499 if (av->network_type == RDMA_NETWORK_IPV4)
500 skb->protocol = htons(ETH_P_IP);
502 skb->protocol = htons(ETH_P_IPV6);
505 pkt->port_num = port_num;
506 pkt->hdr = skb_put_zero(skb, paylen);
507 pkt->mask |= RXE_GRH_MASK;
510 rdma_put_gid_attr(attr);
515 * this is required by rxe_cfg to match rxe devices in
516 * /sys/class/infiniband up with their underlying ethernet devices
518 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
520 return rxe->ndev->name;
523 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
525 return IB_LINK_LAYER_ETHERNET;
528 int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
531 struct rxe_dev *rxe = NULL;
533 rxe = ib_alloc_device(rxe_dev, ib_dev);
539 err = rxe_add(rxe, ndev->mtu, ibdev_name);
541 ib_dealloc_device(&rxe->ib_dev);
548 static void rxe_port_event(struct rxe_dev *rxe,
549 enum ib_event_type event)
553 ev.device = &rxe->ib_dev;
554 ev.element.port_num = 1;
557 ib_dispatch_event(&ev);
560 /* Caller must hold net_info_lock */
561 void rxe_port_up(struct rxe_dev *rxe)
563 struct rxe_port *port;
566 port->attr.state = IB_PORT_ACTIVE;
568 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
569 dev_info(&rxe->ib_dev.dev, "set active\n");
572 /* Caller must hold net_info_lock */
573 void rxe_port_down(struct rxe_dev *rxe)
575 struct rxe_port *port;
578 port->attr.state = IB_PORT_DOWN;
580 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
581 rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
582 dev_info(&rxe->ib_dev.dev, "set down\n");
585 void rxe_set_port_state(struct rxe_dev *rxe)
587 if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
593 static int rxe_notify(struct notifier_block *not_blk,
597 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
598 struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
604 case NETDEV_UNREGISTER:
605 ib_unregister_device_queued(&rxe->ib_dev);
613 case NETDEV_CHANGEMTU:
614 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
615 rxe_set_mtu(rxe, ndev->mtu);
618 rxe_set_port_state(rxe);
621 case NETDEV_GOING_DOWN:
622 case NETDEV_CHANGEADDR:
623 case NETDEV_CHANGENAME:
624 case NETDEV_FEAT_CHANGE:
626 pr_info("ignoring netdev event = %ld for %s\n",
631 ib_device_put(&rxe->ib_dev);
635 static struct notifier_block rxe_net_notifier = {
636 .notifier_call = rxe_notify,
639 static int rxe_net_ipv4_init(void)
641 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
642 htons(ROCE_V2_UDP_DPORT), false);
643 if (IS_ERR(recv_sockets.sk4)) {
644 recv_sockets.sk4 = NULL;
645 pr_err("Failed to create IPv4 UDP tunnel\n");
652 static int rxe_net_ipv6_init(void)
654 #if IS_ENABLED(CONFIG_IPV6)
656 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
657 htons(ROCE_V2_UDP_DPORT), true);
658 if (IS_ERR(recv_sockets.sk6)) {
659 recv_sockets.sk6 = NULL;
660 pr_err("Failed to create IPv6 UDP tunnel\n");
667 void rxe_net_exit(void)
669 rxe_release_udp_tunnel(recv_sockets.sk6);
670 rxe_release_udp_tunnel(recv_sockets.sk4);
671 unregister_netdevice_notifier(&rxe_net_notifier);
674 int rxe_net_init(void)
678 recv_sockets.sk6 = NULL;
680 err = rxe_net_ipv4_init();
683 err = rxe_net_ipv6_init();
686 err = register_netdevice_notifier(&rxe_net_notifier);
688 pr_err("Failed to register netdev notifier\n");