]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/core/roce_gid_mgmt.c
uuid: remove uuid_be
[linux.git] / drivers / infiniband / core / roce_gid_mgmt.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 enum gid_op_type {
46         GID_DEL = 0,
47         GID_ADD
48 };
49
50 struct update_gid_event_work {
51         struct work_struct work;
52         union ib_gid       gid;
53         struct ib_gid_attr gid_attr;
54         enum gid_op_type gid_op;
55 };
56
57 #define ROCE_NETDEV_CALLBACK_SZ         3
58 struct netdev_event_work_cmd {
59         roce_netdev_callback    cb;
60         roce_netdev_filter      filter;
61         struct net_device       *ndev;
62         struct net_device       *filter_ndev;
63 };
64
65 struct netdev_event_work {
66         struct work_struct              work;
67         struct netdev_event_work_cmd    cmds[ROCE_NETDEV_CALLBACK_SZ];
68 };
69
70 static const struct {
71         bool (*is_supported)(const struct ib_device *device, u8 port_num);
72         enum ib_gid_type gid_type;
73 } PORT_CAP_TO_GID_TYPE[] = {
74         {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
75         {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
76 };
77
78 #define CAP_TO_GID_TABLE_SIZE   ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
79
80 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
81 {
82         int i;
83         unsigned int ret_flags = 0;
84
85         if (!rdma_protocol_roce(ib_dev, port))
86                 return 1UL << IB_GID_TYPE_IB;
87
88         for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
89                 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
90                         ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
91
92         return ret_flags;
93 }
94 EXPORT_SYMBOL(roce_gid_type_mask_support);
95
96 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
97                        u8 port, union ib_gid *gid,
98                        struct ib_gid_attr *gid_attr)
99 {
100         int i;
101         unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
102
103         for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
104                 if ((1UL << i) & gid_type_mask) {
105                         gid_attr->gid_type = i;
106                         switch (gid_op) {
107                         case GID_ADD:
108                                 ib_cache_gid_add(ib_dev, port,
109                                                  gid, gid_attr);
110                                 break;
111                         case GID_DEL:
112                                 ib_cache_gid_del(ib_dev, port,
113                                                  gid, gid_attr);
114                                 break;
115                         }
116                 }
117         }
118 }
119
120 enum bonding_slave_state {
121         BONDING_SLAVE_STATE_ACTIVE      = 1UL << 0,
122         BONDING_SLAVE_STATE_INACTIVE    = 1UL << 1,
123         /* No primary slave or the device isn't a slave in bonding */
124         BONDING_SLAVE_STATE_NA          = 1UL << 2,
125 };
126
127 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
128                                                                    struct net_device *upper)
129 {
130         if (upper && netif_is_bond_master(upper)) {
131                 struct net_device *pdev =
132                         bond_option_active_slave_get_rcu(netdev_priv(upper));
133
134                 if (pdev)
135                         return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
136                                 BONDING_SLAVE_STATE_INACTIVE;
137         }
138
139         return BONDING_SLAVE_STATE_NA;
140 }
141
142 #define REQUIRED_BOND_STATES            (BONDING_SLAVE_STATE_ACTIVE |   \
143                                          BONDING_SLAVE_STATE_NA)
144 static int is_eth_port_of_netdev(struct ib_device *ib_dev, u8 port,
145                                  struct net_device *rdma_ndev, void *cookie)
146 {
147         struct net_device *real_dev;
148         int res;
149
150         if (!rdma_ndev)
151                 return 0;
152
153         rcu_read_lock();
154         real_dev = rdma_vlan_dev_real_dev(cookie);
155         if (!real_dev)
156                 real_dev = cookie;
157
158         res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
159                (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
160                 REQUIRED_BOND_STATES)) ||
161                real_dev == rdma_ndev);
162
163         rcu_read_unlock();
164         return res;
165 }
166
167 static int is_eth_port_inactive_slave(struct ib_device *ib_dev, u8 port,
168                                       struct net_device *rdma_ndev, void *cookie)
169 {
170         struct net_device *master_dev;
171         int res;
172
173         if (!rdma_ndev)
174                 return 0;
175
176         rcu_read_lock();
177         master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
178         res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
179                 BONDING_SLAVE_STATE_INACTIVE;
180         rcu_read_unlock();
181
182         return res;
183 }
184
185 static int pass_all_filter(struct ib_device *ib_dev, u8 port,
186                            struct net_device *rdma_ndev, void *cookie)
187 {
188         return 1;
189 }
190
191 static int upper_device_filter(struct ib_device *ib_dev, u8 port,
192                                struct net_device *rdma_ndev, void *cookie)
193 {
194         int res;
195
196         if (!rdma_ndev)
197                 return 0;
198
199         if (rdma_ndev == cookie)
200                 return 1;
201
202         rcu_read_lock();
203         res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
204         rcu_read_unlock();
205
206         return res;
207 }
208
209 static void update_gid_ip(enum gid_op_type gid_op,
210                           struct ib_device *ib_dev,
211                           u8 port, struct net_device *ndev,
212                           struct sockaddr *addr)
213 {
214         union ib_gid gid;
215         struct ib_gid_attr gid_attr;
216
217         rdma_ip2gid(addr, &gid);
218         memset(&gid_attr, 0, sizeof(gid_attr));
219         gid_attr.ndev = ndev;
220
221         update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
222 }
223
224 static void enum_netdev_default_gids(struct ib_device *ib_dev,
225                                      u8 port, struct net_device *event_ndev,
226                                      struct net_device *rdma_ndev)
227 {
228         unsigned long gid_type_mask;
229
230         rcu_read_lock();
231         if (!rdma_ndev ||
232             ((rdma_ndev != event_ndev &&
233               !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
234              is_eth_active_slave_of_bonding_rcu(rdma_ndev,
235                                                 netdev_master_upper_dev_get_rcu(rdma_ndev)) ==
236              BONDING_SLAVE_STATE_INACTIVE)) {
237                 rcu_read_unlock();
238                 return;
239         }
240         rcu_read_unlock();
241
242         gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
243
244         ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, gid_type_mask,
245                                      IB_CACHE_GID_DEFAULT_MODE_SET);
246 }
247
248 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
249                                             u8 port,
250                                             struct net_device *event_ndev,
251                                             struct net_device *rdma_ndev)
252 {
253         struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
254
255         if (!rdma_ndev)
256                 return;
257
258         if (!real_dev)
259                 real_dev = event_ndev;
260
261         rcu_read_lock();
262
263         if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
264             is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
265             BONDING_SLAVE_STATE_INACTIVE) {
266                 unsigned long gid_type_mask;
267
268                 rcu_read_unlock();
269
270                 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
271
272                 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
273                                              gid_type_mask,
274                                              IB_CACHE_GID_DEFAULT_MODE_DELETE);
275         } else {
276                 rcu_read_unlock();
277         }
278 }
279
280 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
281                                  u8 port, struct net_device *ndev)
282 {
283         struct in_device *in_dev;
284         struct sin_list {
285                 struct list_head        list;
286                 struct sockaddr_in      ip;
287         };
288         struct sin_list *sin_iter;
289         struct sin_list *sin_temp;
290
291         LIST_HEAD(sin_list);
292         if (ndev->reg_state >= NETREG_UNREGISTERING)
293                 return;
294
295         rcu_read_lock();
296         in_dev = __in_dev_get_rcu(ndev);
297         if (!in_dev) {
298                 rcu_read_unlock();
299                 return;
300         }
301
302         for_ifa(in_dev) {
303                 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
304
305                 if (!entry)
306                         continue;
307
308                 entry->ip.sin_family = AF_INET;
309                 entry->ip.sin_addr.s_addr = ifa->ifa_address;
310                 list_add_tail(&entry->list, &sin_list);
311         }
312         endfor_ifa(in_dev);
313         rcu_read_unlock();
314
315         list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
316                 update_gid_ip(GID_ADD, ib_dev, port, ndev,
317                               (struct sockaddr *)&sin_iter->ip);
318                 list_del(&sin_iter->list);
319                 kfree(sin_iter);
320         }
321 }
322
323 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
324                                  u8 port, struct net_device *ndev)
325 {
326         struct inet6_ifaddr *ifp;
327         struct inet6_dev *in6_dev;
328         struct sin6_list {
329                 struct list_head        list;
330                 struct sockaddr_in6     sin6;
331         };
332         struct sin6_list *sin6_iter;
333         struct sin6_list *sin6_temp;
334         struct ib_gid_attr gid_attr = {.ndev = ndev};
335         LIST_HEAD(sin6_list);
336
337         if (ndev->reg_state >= NETREG_UNREGISTERING)
338                 return;
339
340         in6_dev = in6_dev_get(ndev);
341         if (!in6_dev)
342                 return;
343
344         read_lock_bh(&in6_dev->lock);
345         list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
346                 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
347
348                 if (!entry)
349                         continue;
350
351                 entry->sin6.sin6_family = AF_INET6;
352                 entry->sin6.sin6_addr = ifp->addr;
353                 list_add_tail(&entry->list, &sin6_list);
354         }
355         read_unlock_bh(&in6_dev->lock);
356
357         in6_dev_put(in6_dev);
358
359         list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
360                 union ib_gid    gid;
361
362                 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
363                 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
364                 list_del(&sin6_iter->list);
365                 kfree(sin6_iter);
366         }
367 }
368
369 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
370                             struct net_device *ndev)
371 {
372         enum_netdev_ipv4_ips(ib_dev, port, ndev);
373         if (IS_ENABLED(CONFIG_IPV6))
374                 enum_netdev_ipv6_ips(ib_dev, port, ndev);
375 }
376
377 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
378                            struct net_device *rdma_ndev, void *cookie)
379 {
380         enum_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
381         _add_netdev_ips(ib_dev, port, cookie);
382 }
383
384 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
385                            struct net_device *rdma_ndev, void *cookie)
386 {
387         ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
388 }
389
390 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
391                                     u8 port,
392                                     struct net_device *rdma_ndev,
393                                     void *cookie)
394 {
395         struct net *net;
396         struct net_device *ndev;
397
398         /* Lock the rtnl to make sure the netdevs does not move under
399          * our feet
400          */
401         rtnl_lock();
402         for_each_net(net)
403                 for_each_netdev(net, ndev)
404                         if (is_eth_port_of_netdev(ib_dev, port, rdma_ndev, ndev))
405                                 add_netdev_ips(ib_dev, port, rdma_ndev, ndev);
406         rtnl_unlock();
407 }
408
409 /* This function will rescan all of the network devices in the system
410  * and add their gids, as needed, to the relevant RoCE devices. */
411 int roce_rescan_device(struct ib_device *ib_dev)
412 {
413         ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
414                             enum_all_gids_of_dev_cb, NULL);
415
416         return 0;
417 }
418
419 static void callback_for_addr_gid_device_scan(struct ib_device *device,
420                                               u8 port,
421                                               struct net_device *rdma_ndev,
422                                               void *cookie)
423 {
424         struct update_gid_event_work *parsed = cookie;
425
426         return update_gid(parsed->gid_op, device,
427                           port, &parsed->gid,
428                           &parsed->gid_attr);
429 }
430
431 struct upper_list {
432         struct list_head list;
433         struct net_device *upper;
434 };
435
436 static int netdev_upper_walk(struct net_device *upper, void *data)
437 {
438         struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
439         struct list_head *upper_list = data;
440
441         if (!entry)
442                 return 0;
443
444         list_add_tail(&entry->list, upper_list);
445         dev_hold(upper);
446         entry->upper = upper;
447
448         return 0;
449 }
450
451 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
452                                 void *cookie,
453                                 void (*handle_netdev)(struct ib_device *ib_dev,
454                                                       u8 port,
455                                                       struct net_device *ndev))
456 {
457         struct net_device *ndev = cookie;
458         struct upper_list *upper_iter;
459         struct upper_list *upper_temp;
460         LIST_HEAD(upper_list);
461
462         rcu_read_lock();
463         netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
464         rcu_read_unlock();
465
466         handle_netdev(ib_dev, port, ndev);
467         list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
468                                  list) {
469                 handle_netdev(ib_dev, port, upper_iter->upper);
470                 dev_put(upper_iter->upper);
471                 list_del(&upper_iter->list);
472                 kfree(upper_iter);
473         }
474 }
475
476 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
477                                       struct net_device *event_ndev)
478 {
479         ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
480 }
481
482 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
483                                  struct net_device *rdma_ndev, void *cookie)
484 {
485         handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
486 }
487
488 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
489                                  struct net_device *rdma_ndev, void *cookie)
490 {
491         handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
492 }
493
494 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
495                                         struct net_device *rdma_ndev,
496                                         void *cookie)
497 {
498         struct net_device *master_ndev;
499
500         rcu_read_lock();
501         master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
502         if (master_ndev)
503                 dev_hold(master_ndev);
504         rcu_read_unlock();
505
506         if (master_ndev) {
507                 bond_delete_netdev_default_gids(ib_dev, port, master_ndev,
508                                                 rdma_ndev);
509                 dev_put(master_ndev);
510         }
511 }
512
513 static void del_netdev_default_ips(struct ib_device *ib_dev, u8 port,
514                                    struct net_device *rdma_ndev, void *cookie)
515 {
516         bond_delete_netdev_default_gids(ib_dev, port, cookie, rdma_ndev);
517 }
518
519 /* The following functions operate on all IB devices. netdevice_event and
520  * addr_event execute ib_enum_all_roce_netdevs through a work.
521  * ib_enum_all_roce_netdevs iterates through all IB devices.
522  */
523
524 static void netdevice_event_work_handler(struct work_struct *_work)
525 {
526         struct netdev_event_work *work =
527                 container_of(_work, struct netdev_event_work, work);
528         unsigned int i;
529
530         for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
531                 ib_enum_all_roce_netdevs(work->cmds[i].filter,
532                                          work->cmds[i].filter_ndev,
533                                          work->cmds[i].cb,
534                                          work->cmds[i].ndev);
535                 dev_put(work->cmds[i].ndev);
536                 dev_put(work->cmds[i].filter_ndev);
537         }
538
539         kfree(work);
540 }
541
542 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
543                                 struct net_device *ndev)
544 {
545         unsigned int i;
546         struct netdev_event_work *ndev_work =
547                 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
548
549         if (!ndev_work)
550                 return NOTIFY_DONE;
551
552         memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
553         for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
554                 if (!ndev_work->cmds[i].ndev)
555                         ndev_work->cmds[i].ndev = ndev;
556                 if (!ndev_work->cmds[i].filter_ndev)
557                         ndev_work->cmds[i].filter_ndev = ndev;
558                 dev_hold(ndev_work->cmds[i].ndev);
559                 dev_hold(ndev_work->cmds[i].filter_ndev);
560         }
561         INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
562
563         queue_work(ib_wq, &ndev_work->work);
564
565         return NOTIFY_DONE;
566 }
567
568 static const struct netdev_event_work_cmd add_cmd = {
569         .cb = add_netdev_ips, .filter = is_eth_port_of_netdev};
570 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
571         .cb = add_netdev_upper_ips, .filter = is_eth_port_of_netdev};
572
573 static void netdevice_event_changeupper(struct netdev_notifier_changeupper_info *changeupper_info,
574                                         struct netdev_event_work_cmd *cmds)
575 {
576         static const struct netdev_event_work_cmd upper_ips_del_cmd = {
577                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
578         static const struct netdev_event_work_cmd bonding_default_del_cmd = {
579                 .cb = del_netdev_default_ips, .filter = is_eth_port_inactive_slave};
580
581         if (changeupper_info->linking == false) {
582                 cmds[0] = upper_ips_del_cmd;
583                 cmds[0].ndev = changeupper_info->upper_dev;
584                 cmds[1] = add_cmd;
585         } else {
586                 cmds[0] = bonding_default_del_cmd;
587                 cmds[0].ndev = changeupper_info->upper_dev;
588                 cmds[1] = add_cmd_upper_ips;
589                 cmds[1].ndev = changeupper_info->upper_dev;
590                 cmds[1].filter_ndev = changeupper_info->upper_dev;
591         }
592 }
593
594 static int netdevice_event(struct notifier_block *this, unsigned long event,
595                            void *ptr)
596 {
597         static const struct netdev_event_work_cmd del_cmd = {
598                 .cb = del_netdev_ips, .filter = pass_all_filter};
599         static const struct netdev_event_work_cmd bonding_default_del_cmd_join = {
600                 .cb = del_netdev_default_ips_join, .filter = is_eth_port_inactive_slave};
601         static const struct netdev_event_work_cmd default_del_cmd = {
602                 .cb = del_netdev_default_ips, .filter = pass_all_filter};
603         static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
604                 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
605         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
606         struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
607
608         if (ndev->type != ARPHRD_ETHER)
609                 return NOTIFY_DONE;
610
611         switch (event) {
612         case NETDEV_REGISTER:
613         case NETDEV_UP:
614                 cmds[0] = bonding_default_del_cmd_join;
615                 cmds[1] = add_cmd;
616                 break;
617
618         case NETDEV_UNREGISTER:
619                 if (ndev->reg_state < NETREG_UNREGISTERED)
620                         cmds[0] = del_cmd;
621                 else
622                         return NOTIFY_DONE;
623                 break;
624
625         case NETDEV_CHANGEADDR:
626                 cmds[0] = default_del_cmd;
627                 cmds[1] = add_cmd;
628                 break;
629
630         case NETDEV_CHANGEUPPER:
631                 netdevice_event_changeupper(
632                         container_of(ptr, struct netdev_notifier_changeupper_info, info),
633                         cmds);
634                 break;
635
636         case NETDEV_BONDING_FAILOVER:
637                 cmds[0] = bonding_event_ips_del_cmd;
638                 cmds[1] = bonding_default_del_cmd_join;
639                 cmds[2] = add_cmd_upper_ips;
640                 break;
641
642         default:
643                 return NOTIFY_DONE;
644         }
645
646         return netdevice_queue_work(cmds, ndev);
647 }
648
649 static void update_gid_event_work_handler(struct work_struct *_work)
650 {
651         struct update_gid_event_work *work =
652                 container_of(_work, struct update_gid_event_work, work);
653
654         ib_enum_all_roce_netdevs(is_eth_port_of_netdev, work->gid_attr.ndev,
655                                  callback_for_addr_gid_device_scan, work);
656
657         dev_put(work->gid_attr.ndev);
658         kfree(work);
659 }
660
661 static int addr_event(struct notifier_block *this, unsigned long event,
662                       struct sockaddr *sa, struct net_device *ndev)
663 {
664         struct update_gid_event_work *work;
665         enum gid_op_type gid_op;
666
667         if (ndev->type != ARPHRD_ETHER)
668                 return NOTIFY_DONE;
669
670         switch (event) {
671         case NETDEV_UP:
672                 gid_op = GID_ADD;
673                 break;
674
675         case NETDEV_DOWN:
676                 gid_op = GID_DEL;
677                 break;
678
679         default:
680                 return NOTIFY_DONE;
681         }
682
683         work = kmalloc(sizeof(*work), GFP_ATOMIC);
684         if (!work)
685                 return NOTIFY_DONE;
686
687         INIT_WORK(&work->work, update_gid_event_work_handler);
688
689         rdma_ip2gid(sa, &work->gid);
690         work->gid_op = gid_op;
691
692         memset(&work->gid_attr, 0, sizeof(work->gid_attr));
693         dev_hold(ndev);
694         work->gid_attr.ndev   = ndev;
695
696         queue_work(ib_wq, &work->work);
697
698         return NOTIFY_DONE;
699 }
700
701 static int inetaddr_event(struct notifier_block *this, unsigned long event,
702                           void *ptr)
703 {
704         struct sockaddr_in      in;
705         struct net_device       *ndev;
706         struct in_ifaddr        *ifa = ptr;
707
708         in.sin_family = AF_INET;
709         in.sin_addr.s_addr = ifa->ifa_address;
710         ndev = ifa->ifa_dev->dev;
711
712         return addr_event(this, event, (struct sockaddr *)&in, ndev);
713 }
714
715 static int inet6addr_event(struct notifier_block *this, unsigned long event,
716                            void *ptr)
717 {
718         struct sockaddr_in6     in6;
719         struct net_device       *ndev;
720         struct inet6_ifaddr     *ifa6 = ptr;
721
722         in6.sin6_family = AF_INET6;
723         in6.sin6_addr = ifa6->addr;
724         ndev = ifa6->idev->dev;
725
726         return addr_event(this, event, (struct sockaddr *)&in6, ndev);
727 }
728
729 static struct notifier_block nb_netdevice = {
730         .notifier_call = netdevice_event
731 };
732
733 static struct notifier_block nb_inetaddr = {
734         .notifier_call = inetaddr_event
735 };
736
737 static struct notifier_block nb_inet6addr = {
738         .notifier_call = inet6addr_event
739 };
740
741 int __init roce_gid_mgmt_init(void)
742 {
743         register_inetaddr_notifier(&nb_inetaddr);
744         if (IS_ENABLED(CONFIG_IPV6))
745                 register_inet6addr_notifier(&nb_inet6addr);
746         /* We relay on the netdevice notifier to enumerate all
747          * existing devices in the system. Register to this notifier
748          * last to make sure we will not miss any IP add/del
749          * callbacks.
750          */
751         register_netdevice_notifier(&nb_netdevice);
752
753         return 0;
754 }
755
756 void __exit roce_gid_mgmt_cleanup(void)
757 {
758         if (IS_ENABLED(CONFIG_IPV6))
759                 unregister_inet6addr_notifier(&nb_inet6addr);
760         unregister_inetaddr_notifier(&nb_inetaddr);
761         unregister_netdevice_notifier(&nb_netdevice);
762         /* Ensure all gid deletion tasks complete before we go down,
763          * to avoid any reference to free'd memory. By the time
764          * ib-core is removed, all physical devices have been removed,
765          * so no issue with remaining hardware contexts.
766          */
767 }