]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/infiniband/core/cache.c
RDMA/core: create struct ib_port_cache
[linux.git] / drivers / infiniband / core / cache.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48         int             table_len;
49         u16             table[0];
50 };
51
52 struct ib_update_work {
53         struct work_struct work;
54         struct ib_device  *device;
55         u8                 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64         GID_ATTR_FIND_MASK_GID          = 1UL << 0,
65         GID_ATTR_FIND_MASK_NETDEV       = 1UL << 1,
66         GID_ATTR_FIND_MASK_DEFAULT      = 1UL << 2,
67         GID_ATTR_FIND_MASK_GID_TYPE     = 1UL << 3,
68 };
69
70 enum gid_table_entry_props {
71         GID_TABLE_ENTRY_INVALID         = 1UL << 0,
72         GID_TABLE_ENTRY_DEFAULT         = 1UL << 1,
73 };
74
75 enum gid_table_write_action {
76         GID_TABLE_WRITE_ACTION_ADD,
77         GID_TABLE_WRITE_ACTION_DEL,
78         /* MODIFY only updates the GID table. Currently only used by
79          * ib_cache_update.
80          */
81         GID_TABLE_WRITE_ACTION_MODIFY
82 };
83
84 struct ib_gid_table_entry {
85         unsigned long       props;
86         union ib_gid        gid;
87         struct ib_gid_attr  attr;
88         void               *context;
89 };
90
91 struct ib_gid_table {
92         int                  sz;
93         /* In RoCE, adding a GID to the table requires:
94          * (a) Find if this GID is already exists.
95          * (b) Find a free space.
96          * (c) Write the new GID
97          *
98          * Delete requires different set of operations:
99          * (a) Find the GID
100          * (b) Delete it.
101          *
102          * Add/delete should be carried out atomically.
103          * This is done by locking this mutex from multiple
104          * writers. We don't need this lock for IB, as the MAD
105          * layer replaces all entries. All data_vec entries
106          * are locked by this lock.
107          **/
108         struct mutex         lock;
109         /* This lock protects the table entries from being
110          * read and written simultaneously.
111          */
112         rwlock_t             rwlock;
113         struct ib_gid_table_entry *data_vec;
114 };
115
116 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port)
117 {
118         if (rdma_cap_roce_gid_table(ib_dev, port)) {
119                 struct ib_event event;
120
121                 event.device            = ib_dev;
122                 event.element.port_num  = port;
123                 event.event             = IB_EVENT_GID_CHANGE;
124
125                 ib_dispatch_event(&event);
126         }
127 }
128
129 static const char * const gid_type_str[] = {
130         [IB_GID_TYPE_IB]        = "IB/RoCE v1",
131         [IB_GID_TYPE_ROCE_UDP_ENCAP]    = "RoCE v2",
132 };
133
134 const char *ib_cache_gid_type_str(enum ib_gid_type gid_type)
135 {
136         if (gid_type < ARRAY_SIZE(gid_type_str) && gid_type_str[gid_type])
137                 return gid_type_str[gid_type];
138
139         return "Invalid GID type";
140 }
141 EXPORT_SYMBOL(ib_cache_gid_type_str);
142
143 int ib_cache_gid_parse_type_str(const char *buf)
144 {
145         unsigned int i;
146         size_t len;
147         int err = -EINVAL;
148
149         len = strlen(buf);
150         if (len == 0)
151                 return -EINVAL;
152
153         if (buf[len - 1] == '\n')
154                 len--;
155
156         for (i = 0; i < ARRAY_SIZE(gid_type_str); ++i)
157                 if (gid_type_str[i] && !strncmp(buf, gid_type_str[i], len) &&
158                     len == strlen(gid_type_str[i])) {
159                         err = i;
160                         break;
161                 }
162
163         return err;
164 }
165 EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
166
167 /* This function expects that rwlock will be write locked in all
168  * scenarios and that lock will be locked in sleep-able (RoCE)
169  * scenarios.
170  */
171 static int write_gid(struct ib_device *ib_dev, u8 port,
172                      struct ib_gid_table *table, int ix,
173                      const union ib_gid *gid,
174                      const struct ib_gid_attr *attr,
175                      enum gid_table_write_action action,
176                      bool  default_gid)
177         __releases(&table->rwlock) __acquires(&table->rwlock)
178 {
179         int ret = 0;
180         struct net_device *old_net_dev;
181         enum ib_gid_type old_gid_type;
182
183         /* in rdma_cap_roce_gid_table, this funciton should be protected by a
184          * sleep-able lock.
185          */
186
187         if (rdma_cap_roce_gid_table(ib_dev, port)) {
188                 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
189                 write_unlock_irq(&table->rwlock);
190                 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
191                  * RoCE providers and thus only updates the cache.
192                  */
193                 if (action == GID_TABLE_WRITE_ACTION_ADD)
194                         ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
195                                               &table->data_vec[ix].context);
196                 else if (action == GID_TABLE_WRITE_ACTION_DEL)
197                         ret = ib_dev->del_gid(ib_dev, port, ix,
198                                               &table->data_vec[ix].context);
199                 write_lock_irq(&table->rwlock);
200         }
201
202         old_net_dev = table->data_vec[ix].attr.ndev;
203         old_gid_type = table->data_vec[ix].attr.gid_type;
204         if (old_net_dev && old_net_dev != attr->ndev)
205                 dev_put(old_net_dev);
206         /* if modify_gid failed, just delete the old gid */
207         if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
208                 gid = &zgid;
209                 attr = &zattr;
210                 table->data_vec[ix].context = NULL;
211         }
212
213         memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
214         memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
215         if (default_gid) {
216                 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
217                 if (action == GID_TABLE_WRITE_ACTION_DEL)
218                         table->data_vec[ix].attr.gid_type = old_gid_type;
219         }
220         if (table->data_vec[ix].attr.ndev &&
221             table->data_vec[ix].attr.ndev != old_net_dev)
222                 dev_hold(table->data_vec[ix].attr.ndev);
223
224         table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
225
226         return ret;
227 }
228
229 static int add_gid(struct ib_device *ib_dev, u8 port,
230                    struct ib_gid_table *table, int ix,
231                    const union ib_gid *gid,
232                    const struct ib_gid_attr *attr,
233                    bool  default_gid) {
234         return write_gid(ib_dev, port, table, ix, gid, attr,
235                          GID_TABLE_WRITE_ACTION_ADD, default_gid);
236 }
237
238 static int modify_gid(struct ib_device *ib_dev, u8 port,
239                       struct ib_gid_table *table, int ix,
240                       const union ib_gid *gid,
241                       const struct ib_gid_attr *attr,
242                       bool  default_gid) {
243         return write_gid(ib_dev, port, table, ix, gid, attr,
244                          GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
245 }
246
247 static int del_gid(struct ib_device *ib_dev, u8 port,
248                    struct ib_gid_table *table, int ix,
249                    bool  default_gid) {
250         return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
251                          GID_TABLE_WRITE_ACTION_DEL, default_gid);
252 }
253
254 /* rwlock should be read locked */
255 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
256                     const struct ib_gid_attr *val, bool default_gid,
257                     unsigned long mask, int *pempty)
258 {
259         int i = 0;
260         int found = -1;
261         int empty = pempty ? -1 : 0;
262
263         while (i < table->sz && (found < 0 || empty < 0)) {
264                 struct ib_gid_table_entry *data = &table->data_vec[i];
265                 struct ib_gid_attr *attr = &data->attr;
266                 int curr_index = i;
267
268                 i++;
269
270                 if (data->props & GID_TABLE_ENTRY_INVALID)
271                         continue;
272
273                 if (empty < 0)
274                         if (!memcmp(&data->gid, &zgid, sizeof(*gid)) &&
275                             !memcmp(attr, &zattr, sizeof(*attr)) &&
276                             !data->props)
277                                 empty = curr_index;
278
279                 if (found >= 0)
280                         continue;
281
282                 if (mask & GID_ATTR_FIND_MASK_GID_TYPE &&
283                     attr->gid_type != val->gid_type)
284                         continue;
285
286                 if (mask & GID_ATTR_FIND_MASK_GID &&
287                     memcmp(gid, &data->gid, sizeof(*gid)))
288                         continue;
289
290                 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
291                     attr->ndev != val->ndev)
292                         continue;
293
294                 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
295                     !!(data->props & GID_TABLE_ENTRY_DEFAULT) !=
296                     default_gid)
297                         continue;
298
299                 found = curr_index;
300         }
301
302         if (pempty)
303                 *pempty = empty;
304
305         return found;
306 }
307
308 static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
309 {
310         gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
311         addrconf_ifid_eui48(&gid->raw[8], dev);
312 }
313
314 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
315                      union ib_gid *gid, struct ib_gid_attr *attr)
316 {
317         struct ib_gid_table *table;
318         int ix;
319         int ret = 0;
320         struct net_device *idev;
321         int empty;
322
323         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
324
325         if (!memcmp(gid, &zgid, sizeof(*gid)))
326                 return -EINVAL;
327
328         if (ib_dev->get_netdev) {
329                 idev = ib_dev->get_netdev(ib_dev, port);
330                 if (idev && attr->ndev != idev) {
331                         union ib_gid default_gid;
332
333                         /* Adding default GIDs in not permitted */
334                         make_default_gid(idev, &default_gid);
335                         if (!memcmp(gid, &default_gid, sizeof(*gid))) {
336                                 dev_put(idev);
337                                 return -EPERM;
338                         }
339                 }
340                 if (idev)
341                         dev_put(idev);
342         }
343
344         mutex_lock(&table->lock);
345         write_lock_irq(&table->rwlock);
346
347         ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
348                       GID_ATTR_FIND_MASK_GID_TYPE |
349                       GID_ATTR_FIND_MASK_NETDEV, &empty);
350         if (ix >= 0)
351                 goto out_unlock;
352
353         if (empty < 0) {
354                 ret = -ENOSPC;
355                 goto out_unlock;
356         }
357
358         ret = add_gid(ib_dev, port, table, empty, gid, attr, false);
359         if (!ret)
360                 dispatch_gid_change_event(ib_dev, port);
361
362 out_unlock:
363         write_unlock_irq(&table->rwlock);
364         mutex_unlock(&table->lock);
365         return ret;
366 }
367
368 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
369                      union ib_gid *gid, struct ib_gid_attr *attr)
370 {
371         struct ib_gid_table *table;
372         int ix;
373
374         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
375
376         mutex_lock(&table->lock);
377         write_lock_irq(&table->rwlock);
378
379         ix = find_gid(table, gid, attr, false,
380                       GID_ATTR_FIND_MASK_GID      |
381                       GID_ATTR_FIND_MASK_GID_TYPE |
382                       GID_ATTR_FIND_MASK_NETDEV   |
383                       GID_ATTR_FIND_MASK_DEFAULT,
384                       NULL);
385         if (ix < 0)
386                 goto out_unlock;
387
388         if (!del_gid(ib_dev, port, table, ix, false))
389                 dispatch_gid_change_event(ib_dev, port);
390
391 out_unlock:
392         write_unlock_irq(&table->rwlock);
393         mutex_unlock(&table->lock);
394         return 0;
395 }
396
397 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
398                                      struct net_device *ndev)
399 {
400         struct ib_gid_table *table;
401         int ix;
402         bool deleted = false;
403
404         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
405
406         mutex_lock(&table->lock);
407         write_lock_irq(&table->rwlock);
408
409         for (ix = 0; ix < table->sz; ix++)
410                 if (table->data_vec[ix].attr.ndev == ndev)
411                         if (!del_gid(ib_dev, port, table, ix,
412                                      !!(table->data_vec[ix].props &
413                                         GID_TABLE_ENTRY_DEFAULT)))
414                                 deleted = true;
415
416         write_unlock_irq(&table->rwlock);
417         mutex_unlock(&table->lock);
418
419         if (deleted)
420                 dispatch_gid_change_event(ib_dev, port);
421
422         return 0;
423 }
424
425 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
426                               union ib_gid *gid, struct ib_gid_attr *attr)
427 {
428         struct ib_gid_table *table;
429
430         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
431
432         if (index < 0 || index >= table->sz)
433                 return -EINVAL;
434
435         if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
436                 return -EAGAIN;
437
438         memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
439         if (attr) {
440                 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
441                 if (attr->ndev)
442                         dev_hold(attr->ndev);
443         }
444
445         return 0;
446 }
447
448 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
449                                     const union ib_gid *gid,
450                                     const struct ib_gid_attr *val,
451                                     unsigned long mask,
452                                     u8 *port, u16 *index)
453 {
454         struct ib_gid_table *table;
455         u8 p;
456         int local_index;
457         unsigned long flags;
458
459         for (p = 0; p < ib_dev->phys_port_cnt; p++) {
460                 table = ib_dev->cache.ports[p].gid;
461                 read_lock_irqsave(&table->rwlock, flags);
462                 local_index = find_gid(table, gid, val, false, mask, NULL);
463                 if (local_index >= 0) {
464                         if (index)
465                                 *index = local_index;
466                         if (port)
467                                 *port = p + rdma_start_port(ib_dev);
468                         read_unlock_irqrestore(&table->rwlock, flags);
469                         return 0;
470                 }
471                 read_unlock_irqrestore(&table->rwlock, flags);
472         }
473
474         return -ENOENT;
475 }
476
477 static int ib_cache_gid_find(struct ib_device *ib_dev,
478                              const union ib_gid *gid,
479                              enum ib_gid_type gid_type,
480                              struct net_device *ndev, u8 *port,
481                              u16 *index)
482 {
483         unsigned long mask = GID_ATTR_FIND_MASK_GID |
484                              GID_ATTR_FIND_MASK_GID_TYPE;
485         struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
486
487         if (ndev)
488                 mask |= GID_ATTR_FIND_MASK_NETDEV;
489
490         return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
491                                         mask, port, index);
492 }
493
494 int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
495                                const union ib_gid *gid,
496                                enum ib_gid_type gid_type,
497                                u8 port, struct net_device *ndev,
498                                u16 *index)
499 {
500         int local_index;
501         struct ib_gid_table *table;
502         unsigned long mask = GID_ATTR_FIND_MASK_GID |
503                              GID_ATTR_FIND_MASK_GID_TYPE;
504         struct ib_gid_attr val = {.ndev = ndev, .gid_type = gid_type};
505         unsigned long flags;
506
507         if (port < rdma_start_port(ib_dev) ||
508             port > rdma_end_port(ib_dev))
509                 return -ENOENT;
510
511         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
512
513         if (ndev)
514                 mask |= GID_ATTR_FIND_MASK_NETDEV;
515
516         read_lock_irqsave(&table->rwlock, flags);
517         local_index = find_gid(table, gid, &val, false, mask, NULL);
518         if (local_index >= 0) {
519                 if (index)
520                         *index = local_index;
521                 read_unlock_irqrestore(&table->rwlock, flags);
522                 return 0;
523         }
524
525         read_unlock_irqrestore(&table->rwlock, flags);
526         return -ENOENT;
527 }
528 EXPORT_SYMBOL(ib_find_cached_gid_by_port);
529
530 /**
531  * ib_find_gid_by_filter - Returns the GID table index where a specified
532  * GID value occurs
533  * @device: The device to query.
534  * @gid: The GID value to search for.
535  * @port_num: The port number of the device where the GID value could be
536  *   searched.
537  * @filter: The filter function is executed on any matching GID in the table.
538  *   If the filter function returns true, the corresponding index is returned,
539  *   otherwise, we continue searching the GID table. It's guaranteed that
540  *   while filter is executed, ndev field is valid and the structure won't
541  *   change. filter is executed in an atomic context. filter must not be NULL.
542  * @index: The index into the cached GID table where the GID was found.  This
543  *   parameter may be NULL.
544  *
545  * ib_cache_gid_find_by_filter() searches for the specified GID value
546  * of which the filter function returns true in the port's GID table.
547  * This function is only supported on RoCE ports.
548  *
549  */
550 static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
551                                        const union ib_gid *gid,
552                                        u8 port,
553                                        bool (*filter)(const union ib_gid *,
554                                                       const struct ib_gid_attr *,
555                                                       void *),
556                                        void *context,
557                                        u16 *index)
558 {
559         struct ib_gid_table *table;
560         unsigned int i;
561         unsigned long flags;
562         bool found = false;
563
564
565         if (port < rdma_start_port(ib_dev) ||
566             port > rdma_end_port(ib_dev) ||
567             !rdma_protocol_roce(ib_dev, port))
568                 return -EPROTONOSUPPORT;
569
570         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
571
572         read_lock_irqsave(&table->rwlock, flags);
573         for (i = 0; i < table->sz; i++) {
574                 struct ib_gid_attr attr;
575
576                 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
577                         goto next;
578
579                 if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
580                         goto next;
581
582                 memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
583
584                 if (filter(gid, &attr, context))
585                         found = true;
586
587 next:
588                 if (found)
589                         break;
590         }
591         read_unlock_irqrestore(&table->rwlock, flags);
592
593         if (!found)
594                 return -ENOENT;
595
596         if (index)
597                 *index = i;
598         return 0;
599 }
600
601 static struct ib_gid_table *alloc_gid_table(int sz)
602 {
603         struct ib_gid_table *table =
604                 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
605
606         if (!table)
607                 return NULL;
608
609         table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
610         if (!table->data_vec)
611                 goto err_free_table;
612
613         mutex_init(&table->lock);
614
615         table->sz = sz;
616         rwlock_init(&table->rwlock);
617
618         return table;
619
620 err_free_table:
621         kfree(table);
622         return NULL;
623 }
624
625 static void release_gid_table(struct ib_gid_table *table)
626 {
627         if (table) {
628                 kfree(table->data_vec);
629                 kfree(table);
630         }
631 }
632
633 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
634                                    struct ib_gid_table *table)
635 {
636         int i;
637         bool deleted = false;
638
639         if (!table)
640                 return;
641
642         write_lock_irq(&table->rwlock);
643         for (i = 0; i < table->sz; ++i) {
644                 if (memcmp(&table->data_vec[i].gid, &zgid,
645                            sizeof(table->data_vec[i].gid)))
646                         if (!del_gid(ib_dev, port, table, i,
647                                      table->data_vec[i].props &
648                                      GID_ATTR_FIND_MASK_DEFAULT))
649                                 deleted = true;
650         }
651         write_unlock_irq(&table->rwlock);
652
653         if (deleted)
654                 dispatch_gid_change_event(ib_dev, port);
655 }
656
657 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
658                                   struct net_device *ndev,
659                                   unsigned long gid_type_mask,
660                                   enum ib_cache_gid_default_mode mode)
661 {
662         union ib_gid gid;
663         struct ib_gid_attr gid_attr;
664         struct ib_gid_attr zattr_type = zattr;
665         struct ib_gid_table *table;
666         unsigned int gid_type;
667
668         table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
669
670         make_default_gid(ndev, &gid);
671         memset(&gid_attr, 0, sizeof(gid_attr));
672         gid_attr.ndev = ndev;
673
674         for (gid_type = 0; gid_type < IB_GID_TYPE_SIZE; ++gid_type) {
675                 int ix;
676                 union ib_gid current_gid;
677                 struct ib_gid_attr current_gid_attr = {};
678
679                 if (1UL << gid_type & ~gid_type_mask)
680                         continue;
681
682                 gid_attr.gid_type = gid_type;
683
684                 mutex_lock(&table->lock);
685                 write_lock_irq(&table->rwlock);
686                 ix = find_gid(table, NULL, &gid_attr, true,
687                               GID_ATTR_FIND_MASK_GID_TYPE |
688                               GID_ATTR_FIND_MASK_DEFAULT,
689                               NULL);
690
691                 /* Coudn't find default GID location */
692                 if (WARN_ON(ix < 0))
693                         goto release;
694
695                 zattr_type.gid_type = gid_type;
696
697                 if (!__ib_cache_gid_get(ib_dev, port, ix,
698                                         &current_gid, &current_gid_attr) &&
699                     mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
700                     !memcmp(&gid, &current_gid, sizeof(gid)) &&
701                     !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
702                         goto release;
703
704                 if (memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
705                     memcmp(&current_gid_attr, &zattr_type,
706                            sizeof(current_gid_attr))) {
707                         if (del_gid(ib_dev, port, table, ix, true)) {
708                                 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
709                                         ix, gid.raw);
710                                 goto release;
711                         } else {
712                                 dispatch_gid_change_event(ib_dev, port);
713                         }
714                 }
715
716                 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
717                         if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
718                                 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
719                                         gid.raw);
720                         else
721                                 dispatch_gid_change_event(ib_dev, port);
722                 }
723
724 release:
725                 if (current_gid_attr.ndev)
726                         dev_put(current_gid_attr.ndev);
727                 write_unlock_irq(&table->rwlock);
728                 mutex_unlock(&table->lock);
729         }
730 }
731
732 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
733                                      struct ib_gid_table *table)
734 {
735         unsigned int i;
736         unsigned long roce_gid_type_mask;
737         unsigned int num_default_gids;
738         unsigned int current_gid = 0;
739
740         roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
741         num_default_gids = hweight_long(roce_gid_type_mask);
742         for (i = 0; i < num_default_gids && i < table->sz; i++) {
743                 struct ib_gid_table_entry *entry =
744                         &table->data_vec[i];
745
746                 entry->props |= GID_TABLE_ENTRY_DEFAULT;
747                 current_gid = find_next_bit(&roce_gid_type_mask,
748                                             BITS_PER_LONG,
749                                             current_gid);
750                 entry->attr.gid_type = current_gid++;
751         }
752
753         return 0;
754 }
755
756 static int _gid_table_setup_one(struct ib_device *ib_dev)
757 {
758         u8 port;
759         struct ib_gid_table *table;
760         int err = 0;
761
762         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
763                 u8 rdma_port = port + rdma_start_port(ib_dev);
764
765                 table =
766                         alloc_gid_table(
767                                 ib_dev->port_immutable[rdma_port].gid_tbl_len);
768                 if (!table) {
769                         err = -ENOMEM;
770                         goto rollback_table_setup;
771                 }
772
773                 err = gid_table_reserve_default(ib_dev,
774                                                 port + rdma_start_port(ib_dev),
775                                                 table);
776                 if (err)
777                         goto rollback_table_setup;
778                 ib_dev->cache.ports[port].gid = table;
779         }
780
781         return 0;
782
783 rollback_table_setup:
784         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
785                 table = ib_dev->cache.ports[port].gid;
786
787                 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
788                                        table);
789                 release_gid_table(table);
790         }
791
792         return err;
793 }
794
795 static void gid_table_release_one(struct ib_device *ib_dev)
796 {
797         struct ib_gid_table *table;
798         u8 port;
799
800         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
801                 table = ib_dev->cache.ports[port].gid;
802                 release_gid_table(table);
803                 ib_dev->cache.ports[port].gid = NULL;
804         }
805 }
806
807 static void gid_table_cleanup_one(struct ib_device *ib_dev)
808 {
809         struct ib_gid_table *table;
810         u8 port;
811
812         for (port = 0; port < ib_dev->phys_port_cnt; port++) {
813                 table = ib_dev->cache.ports[port].gid;
814                 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
815                                        table);
816         }
817 }
818
819 static int gid_table_setup_one(struct ib_device *ib_dev)
820 {
821         int err;
822
823         err = _gid_table_setup_one(ib_dev);
824
825         if (err)
826                 return err;
827
828         err = roce_rescan_device(ib_dev);
829
830         if (err) {
831                 gid_table_cleanup_one(ib_dev);
832                 gid_table_release_one(ib_dev);
833         }
834
835         return err;
836 }
837
838 int ib_get_cached_gid(struct ib_device *device,
839                       u8                port_num,
840                       int               index,
841                       union ib_gid     *gid,
842                       struct ib_gid_attr *gid_attr)
843 {
844         int res;
845         unsigned long flags;
846         struct ib_gid_table *table;
847
848         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
849                 return -EINVAL;
850
851         table = device->cache.ports[port_num - rdma_start_port(device)].gid;
852         read_lock_irqsave(&table->rwlock, flags);
853         res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr);
854         read_unlock_irqrestore(&table->rwlock, flags);
855
856         return res;
857 }
858 EXPORT_SYMBOL(ib_get_cached_gid);
859
860 int ib_find_cached_gid(struct ib_device *device,
861                        const union ib_gid *gid,
862                        enum ib_gid_type gid_type,
863                        struct net_device *ndev,
864                        u8               *port_num,
865                        u16              *index)
866 {
867         return ib_cache_gid_find(device, gid, gid_type, ndev, port_num, index);
868 }
869 EXPORT_SYMBOL(ib_find_cached_gid);
870
871 int ib_find_gid_by_filter(struct ib_device *device,
872                           const union ib_gid *gid,
873                           u8 port_num,
874                           bool (*filter)(const union ib_gid *gid,
875                                          const struct ib_gid_attr *,
876                                          void *),
877                           void *context, u16 *index)
878 {
879         /* Only RoCE GID table supports filter function */
880         if (!rdma_cap_roce_gid_table(device, port_num) && filter)
881                 return -EPROTONOSUPPORT;
882
883         return ib_cache_gid_find_by_filter(device, gid,
884                                            port_num, filter,
885                                            context, index);
886 }
887 EXPORT_SYMBOL(ib_find_gid_by_filter);
888
889 int ib_get_cached_pkey(struct ib_device *device,
890                        u8                port_num,
891                        int               index,
892                        u16              *pkey)
893 {
894         struct ib_pkey_cache *cache;
895         unsigned long flags;
896         int ret = 0;
897
898         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
899                 return -EINVAL;
900
901         read_lock_irqsave(&device->cache.lock, flags);
902
903         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
904
905         if (index < 0 || index >= cache->table_len)
906                 ret = -EINVAL;
907         else
908                 *pkey = cache->table[index];
909
910         read_unlock_irqrestore(&device->cache.lock, flags);
911
912         return ret;
913 }
914 EXPORT_SYMBOL(ib_get_cached_pkey);
915
916 int ib_find_cached_pkey(struct ib_device *device,
917                         u8                port_num,
918                         u16               pkey,
919                         u16              *index)
920 {
921         struct ib_pkey_cache *cache;
922         unsigned long flags;
923         int i;
924         int ret = -ENOENT;
925         int partial_ix = -1;
926
927         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
928                 return -EINVAL;
929
930         read_lock_irqsave(&device->cache.lock, flags);
931
932         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
933
934         *index = -1;
935
936         for (i = 0; i < cache->table_len; ++i)
937                 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
938                         if (cache->table[i] & 0x8000) {
939                                 *index = i;
940                                 ret = 0;
941                                 break;
942                         } else
943                                 partial_ix = i;
944                 }
945
946         if (ret && partial_ix >= 0) {
947                 *index = partial_ix;
948                 ret = 0;
949         }
950
951         read_unlock_irqrestore(&device->cache.lock, flags);
952
953         return ret;
954 }
955 EXPORT_SYMBOL(ib_find_cached_pkey);
956
957 int ib_find_exact_cached_pkey(struct ib_device *device,
958                               u8                port_num,
959                               u16               pkey,
960                               u16              *index)
961 {
962         struct ib_pkey_cache *cache;
963         unsigned long flags;
964         int i;
965         int ret = -ENOENT;
966
967         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
968                 return -EINVAL;
969
970         read_lock_irqsave(&device->cache.lock, flags);
971
972         cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
973
974         *index = -1;
975
976         for (i = 0; i < cache->table_len; ++i)
977                 if (cache->table[i] == pkey) {
978                         *index = i;
979                         ret = 0;
980                         break;
981                 }
982
983         read_unlock_irqrestore(&device->cache.lock, flags);
984
985         return ret;
986 }
987 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
988
989 int ib_get_cached_lmc(struct ib_device *device,
990                       u8                port_num,
991                       u8                *lmc)
992 {
993         unsigned long flags;
994         int ret = 0;
995
996         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
997                 return -EINVAL;
998
999         read_lock_irqsave(&device->cache.lock, flags);
1000         *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
1001         read_unlock_irqrestore(&device->cache.lock, flags);
1002
1003         return ret;
1004 }
1005 EXPORT_SYMBOL(ib_get_cached_lmc);
1006
1007 int ib_get_cached_port_state(struct ib_device   *device,
1008                              u8                  port_num,
1009                              enum ib_port_state *port_state)
1010 {
1011         unsigned long flags;
1012         int ret = 0;
1013
1014         if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
1015                 return -EINVAL;
1016
1017         read_lock_irqsave(&device->cache.lock, flags);
1018         *port_state = device->cache.ports[port_num
1019                 - rdma_start_port(device)].port_state;
1020         read_unlock_irqrestore(&device->cache.lock, flags);
1021
1022         return ret;
1023 }
1024 EXPORT_SYMBOL(ib_get_cached_port_state);
1025
1026 static void ib_cache_update(struct ib_device *device,
1027                             u8                port)
1028 {
1029         struct ib_port_attr       *tprops = NULL;
1030         struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
1031         struct ib_gid_cache {
1032                 int             table_len;
1033                 union ib_gid    table[0];
1034         }                         *gid_cache = NULL;
1035         int                        i;
1036         int                        ret;
1037         struct ib_gid_table       *table;
1038         bool                       use_roce_gid_table =
1039                                         rdma_cap_roce_gid_table(device, port);
1040
1041         if (port < rdma_start_port(device) || port > rdma_end_port(device))
1042                 return;
1043
1044         table = device->cache.ports[port - rdma_start_port(device)].gid;
1045
1046         tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
1047         if (!tprops)
1048                 return;
1049
1050         ret = ib_query_port(device, port, tprops);
1051         if (ret) {
1052                 pr_warn("ib_query_port failed (%d) for %s\n",
1053                         ret, device->name);
1054                 goto err;
1055         }
1056
1057         pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
1058                              sizeof *pkey_cache->table, GFP_KERNEL);
1059         if (!pkey_cache)
1060                 goto err;
1061
1062         pkey_cache->table_len = tprops->pkey_tbl_len;
1063
1064         if (!use_roce_gid_table) {
1065                 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
1066                             sizeof(*gid_cache->table), GFP_KERNEL);
1067                 if (!gid_cache)
1068                         goto err;
1069
1070                 gid_cache->table_len = tprops->gid_tbl_len;
1071         }
1072
1073         for (i = 0; i < pkey_cache->table_len; ++i) {
1074                 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
1075                 if (ret) {
1076                         pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
1077                                 ret, device->name, i);
1078                         goto err;
1079                 }
1080         }
1081
1082         if (!use_roce_gid_table) {
1083                 for (i = 0;  i < gid_cache->table_len; ++i) {
1084                         ret = ib_query_gid(device, port, i,
1085                                            gid_cache->table + i, NULL);
1086                         if (ret) {
1087                                 pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
1088                                         ret, device->name, i);
1089                                 goto err;
1090                         }
1091                 }
1092         }
1093
1094         write_lock_irq(&device->cache.lock);
1095
1096         old_pkey_cache = device->cache.ports[port -
1097                 rdma_start_port(device)].pkey;
1098
1099         device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
1100         if (!use_roce_gid_table) {
1101                 write_lock(&table->rwlock);
1102                 for (i = 0; i < gid_cache->table_len; i++) {
1103                         modify_gid(device, port, table, i, gid_cache->table + i,
1104                                    &zattr, false);
1105                 }
1106                 write_unlock(&table->rwlock);
1107         }
1108
1109         device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
1110         device->cache.ports[port - rdma_start_port(device)].port_state =
1111                 tprops->state;
1112
1113         write_unlock_irq(&device->cache.lock);
1114
1115         kfree(gid_cache);
1116         kfree(old_pkey_cache);
1117         kfree(tprops);
1118         return;
1119
1120 err:
1121         kfree(pkey_cache);
1122         kfree(gid_cache);
1123         kfree(tprops);
1124 }
1125
1126 static void ib_cache_task(struct work_struct *_work)
1127 {
1128         struct ib_update_work *work =
1129                 container_of(_work, struct ib_update_work, work);
1130
1131         ib_cache_update(work->device, work->port_num);
1132         kfree(work);
1133 }
1134
1135 static void ib_cache_event(struct ib_event_handler *handler,
1136                            struct ib_event *event)
1137 {
1138         struct ib_update_work *work;
1139
1140         if (event->event == IB_EVENT_PORT_ERR    ||
1141             event->event == IB_EVENT_PORT_ACTIVE ||
1142             event->event == IB_EVENT_LID_CHANGE  ||
1143             event->event == IB_EVENT_PKEY_CHANGE ||
1144             event->event == IB_EVENT_SM_CHANGE   ||
1145             event->event == IB_EVENT_CLIENT_REREGISTER ||
1146             event->event == IB_EVENT_GID_CHANGE) {
1147                 work = kmalloc(sizeof *work, GFP_ATOMIC);
1148                 if (work) {
1149                         INIT_WORK(&work->work, ib_cache_task);
1150                         work->device   = event->device;
1151                         work->port_num = event->element.port_num;
1152                         queue_work(ib_wq, &work->work);
1153                 }
1154         }
1155 }
1156
1157 int ib_cache_setup_one(struct ib_device *device)
1158 {
1159         int p;
1160         int err;
1161
1162         rwlock_init(&device->cache.lock);
1163
1164         device->cache.ports =
1165                 kzalloc(sizeof(*device->cache.ports) *
1166                         (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
1167         if (!device->cache.ports) {
1168                 err = -ENOMEM;
1169                 goto out;
1170         }
1171
1172         err = gid_table_setup_one(device);
1173         if (err)
1174                 goto out;
1175
1176         for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1177                 ib_cache_update(device, p + rdma_start_port(device));
1178
1179         INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
1180                               device, ib_cache_event);
1181         err = ib_register_event_handler(&device->cache.event_handler);
1182         if (err)
1183                 goto err;
1184
1185         return 0;
1186
1187 err:
1188         gid_table_cleanup_one(device);
1189 out:
1190         return err;
1191 }
1192
1193 void ib_cache_release_one(struct ib_device *device)
1194 {
1195         int p;
1196
1197         /*
1198          * The release function frees all the cache elements.
1199          * This function should be called as part of freeing
1200          * all the device's resources when the cache could no
1201          * longer be accessed.
1202          */
1203         for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
1204                 kfree(device->cache.ports[p].pkey);
1205
1206         gid_table_release_one(device);
1207         kfree(device->cache.ports);
1208 }
1209
1210 void ib_cache_cleanup_one(struct ib_device *device)
1211 {
1212         /* The cleanup function unregisters the event handler,
1213          * waits for all in-progress workqueue elements and cleans
1214          * up the GID cache. This function should be called after
1215          * the device was removed from the devices list and all
1216          * clients were removed, so the cache exists but is
1217          * non-functional and shouldn't be updated anymore.
1218          */
1219         ib_unregister_event_handler(&device->cache.event_handler);
1220         flush_workqueue(ib_wq);
1221         gid_table_cleanup_one(device);
1222 }
1223
1224 void __init ib_cache_setup(void)
1225 {
1226         roce_gid_mgmt_init();
1227 }
1228
1229 void __exit ib_cache_cleanup(void)
1230 {
1231         roce_gid_mgmt_cleanup();
1232 }