2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
28 #include <linux/sysctl.h>
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41 #include <linux/inetdevice.h>
42 #include <net/addrconf.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(unsigned long arg);
55 static void __neigh_notify(struct neighbour *n, int type, int flags,
57 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
58 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 static const struct file_operations neigh_stat_seq_fops;
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
67 - All the scans/updates to hash buckets MUST be made under this lock.
68 - NOTHING clever should be made under this lock: no callbacks
69 to protocol backends, no attempts to send something to network.
70 It will result in deadlocks, if backend/driver wants to use neighbour
72 - If the entry requires some non-trivial actions, increase
73 its reference count and release table lock.
75 Neighbour entries are protected:
76 - with reference count.
77 - with rwlock neigh->lock
79 Reference count prevents destruction.
81 neigh->lock mainly serializes ll address data and its validity state.
82 However, the same lock is used to protect another entry fields:
86 Again, nothing clever shall be made under neigh->lock,
87 the most complicated procedure, which we allow is dev->hard_header.
88 It is supposed, that dev->hard_header is simplistic and does
89 not make callbacks to neighbour tables.
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 if (neigh->parms->neigh_cleanup)
101 neigh->parms->neigh_cleanup(neigh);
103 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
104 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
105 neigh_release(neigh);
109 * It is random distribution in the interval (1/2)*base...(3/2)*base.
110 * It corresponds to default IPv6 settings and is not overridable,
111 * because it is really reasonable choice.
114 unsigned long neigh_rand_reach_time(unsigned long base)
116 return base ? (prandom_u32() % base) + (base >> 1) : 0;
118 EXPORT_SYMBOL(neigh_rand_reach_time);
121 static int neigh_forced_gc(struct neigh_table *tbl)
125 struct neigh_hash_table *nht;
127 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
129 write_lock_bh(&tbl->lock);
130 nht = rcu_dereference_protected(tbl->nht,
131 lockdep_is_held(&tbl->lock));
132 for (i = 0; i < (1 << nht->hash_shift); i++) {
134 struct neighbour __rcu **np;
136 np = &nht->hash_buckets[i];
137 while ((n = rcu_dereference_protected(*np,
138 lockdep_is_held(&tbl->lock))) != NULL) {
139 /* Neighbour record may be discarded if:
140 * - nobody refers to it.
141 * - it is not permanent
143 write_lock(&n->lock);
144 if (atomic_read(&n->refcnt) == 1 &&
145 !(n->nud_state & NUD_PERMANENT)) {
146 rcu_assign_pointer(*np,
147 rcu_dereference_protected(n->next,
148 lockdep_is_held(&tbl->lock)));
151 write_unlock(&n->lock);
152 neigh_cleanup_and_release(n);
155 write_unlock(&n->lock);
160 tbl->last_flush = jiffies;
162 write_unlock_bh(&tbl->lock);
167 static void neigh_add_timer(struct neighbour *n, unsigned long when)
170 if (unlikely(mod_timer(&n->timer, when))) {
171 printk("NEIGH: BUG, double timer add, state is %x\n",
177 static int neigh_del_timer(struct neighbour *n)
179 if ((n->nud_state & NUD_IN_TIMER) &&
180 del_timer(&n->timer)) {
187 static void pneigh_queue_purge(struct sk_buff_head *list)
191 while ((skb = skb_dequeue(list)) != NULL) {
197 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
200 struct neigh_hash_table *nht;
202 nht = rcu_dereference_protected(tbl->nht,
203 lockdep_is_held(&tbl->lock));
205 for (i = 0; i < (1 << nht->hash_shift); i++) {
207 struct neighbour __rcu **np = &nht->hash_buckets[i];
209 while ((n = rcu_dereference_protected(*np,
210 lockdep_is_held(&tbl->lock))) != NULL) {
211 if (dev && n->dev != dev) {
215 rcu_assign_pointer(*np,
216 rcu_dereference_protected(n->next,
217 lockdep_is_held(&tbl->lock)));
218 write_lock(&n->lock);
222 if (atomic_read(&n->refcnt) != 1) {
223 /* The most unpleasant situation.
224 We must destroy neighbour entry,
225 but someone still uses it.
227 The destroy will be delayed until
228 the last user releases us, but
229 we must kill timers etc. and move
232 __skb_queue_purge(&n->arp_queue);
233 n->arp_queue_len_bytes = 0;
234 n->output = neigh_blackhole;
235 if (n->nud_state & NUD_VALID)
236 n->nud_state = NUD_NOARP;
238 n->nud_state = NUD_NONE;
239 neigh_dbg(2, "neigh %p is stray\n", n);
241 write_unlock(&n->lock);
242 neigh_cleanup_and_release(n);
247 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
249 write_lock_bh(&tbl->lock);
250 neigh_flush_dev(tbl, dev);
251 write_unlock_bh(&tbl->lock);
253 EXPORT_SYMBOL(neigh_changeaddr);
255 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
257 write_lock_bh(&tbl->lock);
258 neigh_flush_dev(tbl, dev);
259 pneigh_ifdown(tbl, dev);
260 write_unlock_bh(&tbl->lock);
262 del_timer_sync(&tbl->proxy_timer);
263 pneigh_queue_purge(&tbl->proxy_queue);
266 EXPORT_SYMBOL(neigh_ifdown);
268 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
270 struct neighbour *n = NULL;
271 unsigned long now = jiffies;
274 entries = atomic_inc_return(&tbl->entries) - 1;
275 if (entries >= tbl->gc_thresh3 ||
276 (entries >= tbl->gc_thresh2 &&
277 time_after(now, tbl->last_flush + 5 * HZ))) {
278 if (!neigh_forced_gc(tbl) &&
279 entries >= tbl->gc_thresh3) {
280 net_info_ratelimited("%s: neighbor table overflow!\n",
282 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
287 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
291 __skb_queue_head_init(&n->arp_queue);
292 rwlock_init(&n->lock);
293 seqlock_init(&n->ha_lock);
294 n->updated = n->used = now;
295 n->nud_state = NUD_NONE;
296 n->output = neigh_blackhole;
297 seqlock_init(&n->hh.hh_lock);
298 n->parms = neigh_parms_clone(&tbl->parms);
299 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
301 NEIGH_CACHE_STAT_INC(tbl, allocs);
303 atomic_set(&n->refcnt, 1);
309 atomic_dec(&tbl->entries);
313 static void neigh_get_hash_rnd(u32 *x)
315 get_random_bytes(x, sizeof(*x));
319 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
321 size_t size = (1 << shift) * sizeof(struct neighbour *);
322 struct neigh_hash_table *ret;
323 struct neighbour __rcu **buckets;
326 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
329 if (size <= PAGE_SIZE)
330 buckets = kzalloc(size, GFP_ATOMIC);
332 buckets = (struct neighbour __rcu **)
333 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
339 ret->hash_buckets = buckets;
340 ret->hash_shift = shift;
341 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
342 neigh_get_hash_rnd(&ret->hash_rnd[i]);
346 static void neigh_hash_free_rcu(struct rcu_head *head)
348 struct neigh_hash_table *nht = container_of(head,
349 struct neigh_hash_table,
351 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
352 struct neighbour __rcu **buckets = nht->hash_buckets;
354 if (size <= PAGE_SIZE)
357 free_pages((unsigned long)buckets, get_order(size));
361 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
362 unsigned long new_shift)
364 unsigned int i, hash;
365 struct neigh_hash_table *new_nht, *old_nht;
367 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
369 old_nht = rcu_dereference_protected(tbl->nht,
370 lockdep_is_held(&tbl->lock));
371 new_nht = neigh_hash_alloc(new_shift);
375 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
376 struct neighbour *n, *next;
378 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
379 lockdep_is_held(&tbl->lock));
382 hash = tbl->hash(n->primary_key, n->dev,
385 hash >>= (32 - new_nht->hash_shift);
386 next = rcu_dereference_protected(n->next,
387 lockdep_is_held(&tbl->lock));
389 rcu_assign_pointer(n->next,
390 rcu_dereference_protected(
391 new_nht->hash_buckets[hash],
392 lockdep_is_held(&tbl->lock)));
393 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
397 rcu_assign_pointer(tbl->nht, new_nht);
398 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
402 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
403 struct net_device *dev)
407 NEIGH_CACHE_STAT_INC(tbl, lookups);
410 n = __neigh_lookup_noref(tbl, pkey, dev);
412 if (!atomic_inc_not_zero(&n->refcnt))
414 NEIGH_CACHE_STAT_INC(tbl, hits);
417 rcu_read_unlock_bh();
420 EXPORT_SYMBOL(neigh_lookup);
422 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
426 int key_len = tbl->key_len;
428 struct neigh_hash_table *nht;
430 NEIGH_CACHE_STAT_INC(tbl, lookups);
433 nht = rcu_dereference_bh(tbl->nht);
434 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
436 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
438 n = rcu_dereference_bh(n->next)) {
439 if (!memcmp(n->primary_key, pkey, key_len) &&
440 net_eq(dev_net(n->dev), net)) {
441 if (!atomic_inc_not_zero(&n->refcnt))
443 NEIGH_CACHE_STAT_INC(tbl, hits);
448 rcu_read_unlock_bh();
451 EXPORT_SYMBOL(neigh_lookup_nodev);
453 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
454 struct net_device *dev, bool want_ref)
457 int key_len = tbl->key_len;
459 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
460 struct neigh_hash_table *nht;
463 rc = ERR_PTR(-ENOBUFS);
467 memcpy(n->primary_key, pkey, key_len);
471 /* Protocol specific setup. */
472 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
474 goto out_neigh_release;
477 if (dev->netdev_ops->ndo_neigh_construct) {
478 error = dev->netdev_ops->ndo_neigh_construct(dev, n);
481 goto out_neigh_release;
485 /* Device specific setup. */
486 if (n->parms->neigh_setup &&
487 (error = n->parms->neigh_setup(n)) < 0) {
489 goto out_neigh_release;
492 n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
494 write_lock_bh(&tbl->lock);
495 nht = rcu_dereference_protected(tbl->nht,
496 lockdep_is_held(&tbl->lock));
498 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
499 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
501 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
503 if (n->parms->dead) {
504 rc = ERR_PTR(-EINVAL);
508 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
509 lockdep_is_held(&tbl->lock));
511 n1 = rcu_dereference_protected(n1->next,
512 lockdep_is_held(&tbl->lock))) {
513 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
524 rcu_assign_pointer(n->next,
525 rcu_dereference_protected(nht->hash_buckets[hash_val],
526 lockdep_is_held(&tbl->lock)));
527 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
528 write_unlock_bh(&tbl->lock);
529 neigh_dbg(2, "neigh %p is created\n", n);
534 write_unlock_bh(&tbl->lock);
539 EXPORT_SYMBOL(__neigh_create);
541 static u32 pneigh_hash(const void *pkey, int key_len)
543 u32 hash_val = *(u32 *)(pkey + key_len - 4);
544 hash_val ^= (hash_val >> 16);
545 hash_val ^= hash_val >> 8;
546 hash_val ^= hash_val >> 4;
547 hash_val &= PNEIGH_HASHMASK;
551 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
555 struct net_device *dev)
558 if (!memcmp(n->key, pkey, key_len) &&
559 net_eq(pneigh_net(n), net) &&
560 (n->dev == dev || !n->dev))
567 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
568 struct net *net, const void *pkey, struct net_device *dev)
570 int key_len = tbl->key_len;
571 u32 hash_val = pneigh_hash(pkey, key_len);
573 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
574 net, pkey, key_len, dev);
576 EXPORT_SYMBOL_GPL(__pneigh_lookup);
578 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
579 struct net *net, const void *pkey,
580 struct net_device *dev, int creat)
582 struct pneigh_entry *n;
583 int key_len = tbl->key_len;
584 u32 hash_val = pneigh_hash(pkey, key_len);
586 read_lock_bh(&tbl->lock);
587 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
588 net, pkey, key_len, dev);
589 read_unlock_bh(&tbl->lock);
596 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
600 write_pnet(&n->net, net);
601 memcpy(n->key, pkey, key_len);
606 if (tbl->pconstructor && tbl->pconstructor(n)) {
614 write_lock_bh(&tbl->lock);
615 n->next = tbl->phash_buckets[hash_val];
616 tbl->phash_buckets[hash_val] = n;
617 write_unlock_bh(&tbl->lock);
621 EXPORT_SYMBOL(pneigh_lookup);
624 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
625 struct net_device *dev)
627 struct pneigh_entry *n, **np;
628 int key_len = tbl->key_len;
629 u32 hash_val = pneigh_hash(pkey, key_len);
631 write_lock_bh(&tbl->lock);
632 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
634 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
635 net_eq(pneigh_net(n), net)) {
637 write_unlock_bh(&tbl->lock);
638 if (tbl->pdestructor)
646 write_unlock_bh(&tbl->lock);
650 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
652 struct pneigh_entry *n, **np;
655 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
656 np = &tbl->phash_buckets[h];
657 while ((n = *np) != NULL) {
658 if (!dev || n->dev == dev) {
660 if (tbl->pdestructor)
673 static void neigh_parms_destroy(struct neigh_parms *parms);
675 static inline void neigh_parms_put(struct neigh_parms *parms)
677 if (atomic_dec_and_test(&parms->refcnt))
678 neigh_parms_destroy(parms);
682 * neighbour must already be out of the table;
685 void neigh_destroy(struct neighbour *neigh)
687 struct net_device *dev = neigh->dev;
689 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
692 pr_warn("Destroying alive neighbour %p\n", neigh);
697 if (neigh_del_timer(neigh))
698 pr_warn("Impossible event\n");
700 write_lock_bh(&neigh->lock);
701 __skb_queue_purge(&neigh->arp_queue);
702 write_unlock_bh(&neigh->lock);
703 neigh->arp_queue_len_bytes = 0;
705 if (dev->netdev_ops->ndo_neigh_destroy)
706 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
709 neigh_parms_put(neigh->parms);
711 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
713 atomic_dec(&neigh->tbl->entries);
714 kfree_rcu(neigh, rcu);
716 EXPORT_SYMBOL(neigh_destroy);
718 /* Neighbour state is suspicious;
721 Called with write_locked neigh.
723 static void neigh_suspect(struct neighbour *neigh)
725 neigh_dbg(2, "neigh %p is suspected\n", neigh);
727 neigh->output = neigh->ops->output;
730 /* Neighbour state is OK;
733 Called with write_locked neigh.
735 static void neigh_connect(struct neighbour *neigh)
737 neigh_dbg(2, "neigh %p is connected\n", neigh);
739 neigh->output = neigh->ops->connected_output;
742 static void neigh_periodic_work(struct work_struct *work)
744 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
746 struct neighbour __rcu **np;
748 struct neigh_hash_table *nht;
750 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
752 write_lock_bh(&tbl->lock);
753 nht = rcu_dereference_protected(tbl->nht,
754 lockdep_is_held(&tbl->lock));
757 * periodically recompute ReachableTime from random function
760 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
761 struct neigh_parms *p;
762 tbl->last_rand = jiffies;
763 list_for_each_entry(p, &tbl->parms_list, list)
765 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
768 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
771 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
772 np = &nht->hash_buckets[i];
774 while ((n = rcu_dereference_protected(*np,
775 lockdep_is_held(&tbl->lock))) != NULL) {
778 write_lock(&n->lock);
780 state = n->nud_state;
781 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
782 write_unlock(&n->lock);
786 if (time_before(n->used, n->confirmed))
787 n->used = n->confirmed;
789 if (atomic_read(&n->refcnt) == 1 &&
790 (state == NUD_FAILED ||
791 time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
794 write_unlock(&n->lock);
795 neigh_cleanup_and_release(n);
798 write_unlock(&n->lock);
804 * It's fine to release lock here, even if hash table
805 * grows while we are preempted.
807 write_unlock_bh(&tbl->lock);
809 write_lock_bh(&tbl->lock);
810 nht = rcu_dereference_protected(tbl->nht,
811 lockdep_is_held(&tbl->lock));
814 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
815 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
816 * BASE_REACHABLE_TIME.
818 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
819 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
820 write_unlock_bh(&tbl->lock);
823 static __inline__ int neigh_max_probes(struct neighbour *n)
825 struct neigh_parms *p = n->parms;
826 return NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
827 (n->nud_state & NUD_PROBE ? NEIGH_VAR(p, MCAST_REPROBES) :
828 NEIGH_VAR(p, MCAST_PROBES));
831 static void neigh_invalidate(struct neighbour *neigh)
832 __releases(neigh->lock)
833 __acquires(neigh->lock)
837 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
838 neigh_dbg(2, "neigh %p is failed\n", neigh);
839 neigh->updated = jiffies;
841 /* It is very thin place. report_unreachable is very complicated
842 routine. Particularly, it can hit the same neighbour entry!
844 So that, we try to be accurate and avoid dead loop. --ANK
846 while (neigh->nud_state == NUD_FAILED &&
847 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
848 write_unlock(&neigh->lock);
849 neigh->ops->error_report(neigh, skb);
850 write_lock(&neigh->lock);
852 __skb_queue_purge(&neigh->arp_queue);
853 neigh->arp_queue_len_bytes = 0;
856 static void neigh_probe(struct neighbour *neigh)
857 __releases(neigh->lock)
859 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
860 /* keep skb alive even if arp_queue overflows */
862 skb = skb_clone(skb, GFP_ATOMIC);
863 write_unlock(&neigh->lock);
864 if (neigh->ops->solicit)
865 neigh->ops->solicit(neigh, skb);
866 atomic_inc(&neigh->probes);
870 /* Called when a timer expires for a neighbour entry. */
872 static void neigh_timer_handler(unsigned long arg)
874 unsigned long now, next;
875 struct neighbour *neigh = (struct neighbour *)arg;
879 write_lock(&neigh->lock);
881 state = neigh->nud_state;
885 if (!(state & NUD_IN_TIMER))
888 if (state & NUD_REACHABLE) {
889 if (time_before_eq(now,
890 neigh->confirmed + neigh->parms->reachable_time)) {
891 neigh_dbg(2, "neigh %p is still alive\n", neigh);
892 next = neigh->confirmed + neigh->parms->reachable_time;
893 } else if (time_before_eq(now,
895 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
896 neigh_dbg(2, "neigh %p is delayed\n", neigh);
897 neigh->nud_state = NUD_DELAY;
898 neigh->updated = jiffies;
899 neigh_suspect(neigh);
900 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
902 neigh_dbg(2, "neigh %p is suspected\n", neigh);
903 neigh->nud_state = NUD_STALE;
904 neigh->updated = jiffies;
905 neigh_suspect(neigh);
908 } else if (state & NUD_DELAY) {
909 if (time_before_eq(now,
911 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
912 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
913 neigh->nud_state = NUD_REACHABLE;
914 neigh->updated = jiffies;
915 neigh_connect(neigh);
917 next = neigh->confirmed + neigh->parms->reachable_time;
919 neigh_dbg(2, "neigh %p is probed\n", neigh);
920 neigh->nud_state = NUD_PROBE;
921 neigh->updated = jiffies;
922 atomic_set(&neigh->probes, 0);
924 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
927 /* NUD_PROBE|NUD_INCOMPLETE */
928 next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
931 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
932 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
933 neigh->nud_state = NUD_FAILED;
935 neigh_invalidate(neigh);
939 if (neigh->nud_state & NUD_IN_TIMER) {
940 if (time_before(next, jiffies + HZ/2))
941 next = jiffies + HZ/2;
942 if (!mod_timer(&neigh->timer, next))
945 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
949 write_unlock(&neigh->lock);
953 neigh_update_notify(neigh, 0);
955 neigh_release(neigh);
958 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
961 bool immediate_probe = false;
963 write_lock_bh(&neigh->lock);
966 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
971 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
972 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
973 NEIGH_VAR(neigh->parms, APP_PROBES)) {
974 unsigned long next, now = jiffies;
976 atomic_set(&neigh->probes,
977 NEIGH_VAR(neigh->parms, UCAST_PROBES));
978 neigh->nud_state = NUD_INCOMPLETE;
979 neigh->updated = now;
980 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
982 neigh_add_timer(neigh, next);
983 immediate_probe = true;
985 neigh->nud_state = NUD_FAILED;
986 neigh->updated = jiffies;
987 write_unlock_bh(&neigh->lock);
992 } else if (neigh->nud_state & NUD_STALE) {
993 neigh_dbg(2, "neigh %p is delayed\n", neigh);
994 neigh->nud_state = NUD_DELAY;
995 neigh->updated = jiffies;
996 neigh_add_timer(neigh, jiffies +
997 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1000 if (neigh->nud_state == NUD_INCOMPLETE) {
1002 while (neigh->arp_queue_len_bytes + skb->truesize >
1003 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1004 struct sk_buff *buff;
1006 buff = __skb_dequeue(&neigh->arp_queue);
1009 neigh->arp_queue_len_bytes -= buff->truesize;
1011 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1014 __skb_queue_tail(&neigh->arp_queue, skb);
1015 neigh->arp_queue_len_bytes += skb->truesize;
1020 if (immediate_probe)
1023 write_unlock(&neigh->lock);
1028 if (neigh->nud_state & NUD_STALE)
1030 write_unlock_bh(&neigh->lock);
1034 EXPORT_SYMBOL(__neigh_event_send);
1036 static void neigh_update_hhs(struct neighbour *neigh)
1038 struct hh_cache *hh;
1039 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1042 if (neigh->dev->header_ops)
1043 update = neigh->dev->header_ops->cache_update;
1048 write_seqlock_bh(&hh->hh_lock);
1049 update(hh, neigh->dev, neigh->ha);
1050 write_sequnlock_bh(&hh->hh_lock);
1057 /* Generic update routine.
1058 -- lladdr is new lladdr or NULL, if it is not supplied.
1059 -- new is new state.
1061 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1063 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1064 lladdr instead of overriding it
1066 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1068 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1070 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1073 Caller MUST hold reference count on the entry.
1076 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1077 u32 flags, u32 nlmsg_pid)
1082 struct net_device *dev;
1083 int update_isrouter = 0;
1085 write_lock_bh(&neigh->lock);
1088 old = neigh->nud_state;
1091 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1092 (old & (NUD_NOARP | NUD_PERMANENT)))
1097 if (!(new & NUD_VALID)) {
1098 neigh_del_timer(neigh);
1099 if (old & NUD_CONNECTED)
1100 neigh_suspect(neigh);
1101 neigh->nud_state = new;
1103 notify = old & NUD_VALID;
1104 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1105 (new & NUD_FAILED)) {
1106 neigh_invalidate(neigh);
1112 /* Compare new lladdr with cached one */
1113 if (!dev->addr_len) {
1114 /* First case: device needs no address. */
1116 } else if (lladdr) {
1117 /* The second case: if something is already cached
1118 and a new address is proposed:
1120 - if they are different, check override flag
1122 if ((old & NUD_VALID) &&
1123 !memcmp(lladdr, neigh->ha, dev->addr_len))
1126 /* No address is supplied; if we know something,
1127 use it, otherwise discard the request.
1130 if (!(old & NUD_VALID))
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1139 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE.
1143 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1144 if (old & NUD_VALID) {
1145 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1146 update_isrouter = 0;
1147 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1148 (old & NUD_CONNECTED)) {
1154 if (lladdr == neigh->ha && new == NUD_STALE &&
1155 !(flags & NEIGH_UPDATE_F_ADMIN))
1161 neigh_del_timer(neigh);
1162 if (new & NUD_PROBE)
1163 atomic_set(&neigh->probes, 0);
1164 if (new & NUD_IN_TIMER)
1165 neigh_add_timer(neigh, (jiffies +
1166 ((new & NUD_REACHABLE) ?
1167 neigh->parms->reachable_time :
1169 neigh->nud_state = new;
1173 if (lladdr != neigh->ha) {
1174 write_seqlock(&neigh->ha_lock);
1175 memcpy(&neigh->ha, lladdr, dev->addr_len);
1176 write_sequnlock(&neigh->ha_lock);
1177 neigh_update_hhs(neigh);
1178 if (!(new & NUD_CONNECTED))
1179 neigh->confirmed = jiffies -
1180 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1185 if (new & NUD_CONNECTED)
1186 neigh_connect(neigh);
1188 neigh_suspect(neigh);
1189 if (!(old & NUD_VALID)) {
1190 struct sk_buff *skb;
1192 /* Again: avoid dead loop if something went wrong */
1194 while (neigh->nud_state & NUD_VALID &&
1195 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1196 struct dst_entry *dst = skb_dst(skb);
1197 struct neighbour *n2, *n1 = neigh;
1198 write_unlock_bh(&neigh->lock);
1202 /* Why not just use 'neigh' as-is? The problem is that
1203 * things such as shaper, eql, and sch_teql can end up
1204 * using alternative, different, neigh objects to output
1205 * the packet in the output path. So what we need to do
1206 * here is re-lookup the top-level neigh in the path so
1207 * we can reinject the packet there.
1211 n2 = dst_neigh_lookup_skb(dst, skb);
1215 n1->output(n1, skb);
1220 write_lock_bh(&neigh->lock);
1222 __skb_queue_purge(&neigh->arp_queue);
1223 neigh->arp_queue_len_bytes = 0;
1226 if (update_isrouter) {
1227 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1228 (neigh->flags | NTF_ROUTER) :
1229 (neigh->flags & ~NTF_ROUTER);
1231 write_unlock_bh(&neigh->lock);
1234 neigh_update_notify(neigh, nlmsg_pid);
1238 EXPORT_SYMBOL(neigh_update);
1240 /* Update the neigh to listen temporarily for probe responses, even if it is
1241 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1243 void __neigh_set_probe_once(struct neighbour *neigh)
1247 neigh->updated = jiffies;
1248 if (!(neigh->nud_state & NUD_FAILED))
1250 neigh->nud_state = NUD_INCOMPLETE;
1251 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1252 neigh_add_timer(neigh,
1253 jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
1255 EXPORT_SYMBOL(__neigh_set_probe_once);
1257 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1258 u8 *lladdr, void *saddr,
1259 struct net_device *dev)
1261 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1262 lladdr || !dev->addr_len);
1264 neigh_update(neigh, lladdr, NUD_STALE,
1265 NEIGH_UPDATE_F_OVERRIDE, 0);
1268 EXPORT_SYMBOL(neigh_event_ns);
1270 /* called with read_lock_bh(&n->lock); */
1271 static void neigh_hh_init(struct neighbour *n)
1273 struct net_device *dev = n->dev;
1274 __be16 prot = n->tbl->protocol;
1275 struct hh_cache *hh = &n->hh;
1277 write_lock_bh(&n->lock);
1279 /* Only one thread can come in here and initialize the
1283 dev->header_ops->cache(n, hh, prot);
1285 write_unlock_bh(&n->lock);
1288 /* Slow and careful. */
1290 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1294 if (!neigh_event_send(neigh, skb)) {
1296 struct net_device *dev = neigh->dev;
1299 if (dev->header_ops->cache && !neigh->hh.hh_len)
1300 neigh_hh_init(neigh);
1303 __skb_pull(skb, skb_network_offset(skb));
1304 seq = read_seqbegin(&neigh->ha_lock);
1305 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1306 neigh->ha, NULL, skb->len);
1307 } while (read_seqretry(&neigh->ha_lock, seq));
1310 rc = dev_queue_xmit(skb);
1321 EXPORT_SYMBOL(neigh_resolve_output);
1323 /* As fast as possible without hh cache */
1325 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327 struct net_device *dev = neigh->dev;
1332 __skb_pull(skb, skb_network_offset(skb));
1333 seq = read_seqbegin(&neigh->ha_lock);
1334 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1335 neigh->ha, NULL, skb->len);
1336 } while (read_seqretry(&neigh->ha_lock, seq));
1339 err = dev_queue_xmit(skb);
1346 EXPORT_SYMBOL(neigh_connected_output);
1348 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1350 return dev_queue_xmit(skb);
1352 EXPORT_SYMBOL(neigh_direct_output);
1354 static void neigh_proxy_process(unsigned long arg)
1356 struct neigh_table *tbl = (struct neigh_table *)arg;
1357 long sched_next = 0;
1358 unsigned long now = jiffies;
1359 struct sk_buff *skb, *n;
1361 spin_lock(&tbl->proxy_queue.lock);
1363 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1364 long tdif = NEIGH_CB(skb)->sched_next - now;
1367 struct net_device *dev = skb->dev;
1369 __skb_unlink(skb, &tbl->proxy_queue);
1370 if (tbl->proxy_redo && netif_running(dev)) {
1372 tbl->proxy_redo(skb);
1379 } else if (!sched_next || tdif < sched_next)
1382 del_timer(&tbl->proxy_timer);
1384 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1385 spin_unlock(&tbl->proxy_queue.lock);
1388 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1389 struct sk_buff *skb)
1391 unsigned long now = jiffies;
1393 unsigned long sched_next = now + (prandom_u32() %
1394 NEIGH_VAR(p, PROXY_DELAY));
1396 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1401 NEIGH_CB(skb)->sched_next = sched_next;
1402 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1404 spin_lock(&tbl->proxy_queue.lock);
1405 if (del_timer(&tbl->proxy_timer)) {
1406 if (time_before(tbl->proxy_timer.expires, sched_next))
1407 sched_next = tbl->proxy_timer.expires;
1411 __skb_queue_tail(&tbl->proxy_queue, skb);
1412 mod_timer(&tbl->proxy_timer, sched_next);
1413 spin_unlock(&tbl->proxy_queue.lock);
1415 EXPORT_SYMBOL(pneigh_enqueue);
1417 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1418 struct net *net, int ifindex)
1420 struct neigh_parms *p;
1422 list_for_each_entry(p, &tbl->parms_list, list) {
1423 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1424 (!p->dev && !ifindex && net_eq(net, &init_net)))
1431 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1432 struct neigh_table *tbl)
1434 struct neigh_parms *p;
1435 struct net *net = dev_net(dev);
1436 const struct net_device_ops *ops = dev->netdev_ops;
1438 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1441 atomic_set(&p->refcnt, 1);
1443 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
1446 write_pnet(&p->net, net);
1447 p->sysctl_table = NULL;
1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1455 write_lock_bh(&tbl->lock);
1456 list_add(&p->list, &tbl->parms.list);
1457 write_unlock_bh(&tbl->lock);
1459 neigh_parms_data_state_cleanall(p);
1463 EXPORT_SYMBOL(neigh_parms_alloc);
1465 static void neigh_rcu_free_parms(struct rcu_head *head)
1467 struct neigh_parms *parms =
1468 container_of(head, struct neigh_parms, rcu_head);
1470 neigh_parms_put(parms);
1473 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1475 if (!parms || parms == &tbl->parms)
1477 write_lock_bh(&tbl->lock);
1478 list_del(&parms->list);
1480 write_unlock_bh(&tbl->lock);
1482 dev_put(parms->dev);
1483 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1485 EXPORT_SYMBOL(neigh_parms_release);
1487 static void neigh_parms_destroy(struct neigh_parms *parms)
1492 static struct lock_class_key neigh_table_proxy_queue_class;
1494 static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly;
1496 void neigh_table_init(int index, struct neigh_table *tbl)
1498 unsigned long now = jiffies;
1499 unsigned long phsize;
1501 INIT_LIST_HEAD(&tbl->parms_list);
1502 list_add(&tbl->parms.list, &tbl->parms_list);
1503 write_pnet(&tbl->parms.net, &init_net);
1504 atomic_set(&tbl->parms.refcnt, 1);
1505 tbl->parms.reachable_time =
1506 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1508 tbl->stats = alloc_percpu(struct neigh_statistics);
1510 panic("cannot create neighbour cache statistics");
1512 #ifdef CONFIG_PROC_FS
1513 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1514 &neigh_stat_seq_fops, tbl))
1515 panic("cannot create neighbour proc dir entry");
1518 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1520 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1521 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1523 if (!tbl->nht || !tbl->phash_buckets)
1524 panic("cannot allocate neighbour cache hashes");
1526 if (!tbl->entry_size)
1527 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1528 tbl->key_len, NEIGH_PRIV_ALIGN);
1530 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1532 rwlock_init(&tbl->lock);
1533 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1534 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1535 tbl->parms.reachable_time);
1536 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1537 skb_queue_head_init_class(&tbl->proxy_queue,
1538 &neigh_table_proxy_queue_class);
1540 tbl->last_flush = now;
1541 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1543 neigh_tables[index] = tbl;
1545 EXPORT_SYMBOL(neigh_table_init);
1547 int neigh_table_clear(int index, struct neigh_table *tbl)
1549 neigh_tables[index] = NULL;
1550 /* It is not clean... Fix it to unload IPv6 module safely */
1551 cancel_delayed_work_sync(&tbl->gc_work);
1552 del_timer_sync(&tbl->proxy_timer);
1553 pneigh_queue_purge(&tbl->proxy_queue);
1554 neigh_ifdown(tbl, NULL);
1555 if (atomic_read(&tbl->entries))
1556 pr_crit("neighbour leakage\n");
1558 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1559 neigh_hash_free_rcu);
1562 kfree(tbl->phash_buckets);
1563 tbl->phash_buckets = NULL;
1565 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1567 free_percpu(tbl->stats);
1572 EXPORT_SYMBOL(neigh_table_clear);
1574 static struct neigh_table *neigh_find_table(int family)
1576 struct neigh_table *tbl = NULL;
1580 tbl = neigh_tables[NEIGH_ARP_TABLE];
1583 tbl = neigh_tables[NEIGH_ND_TABLE];
1586 tbl = neigh_tables[NEIGH_DN_TABLE];
1593 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
1594 struct netlink_ext_ack *extack)
1596 struct net *net = sock_net(skb->sk);
1598 struct nlattr *dst_attr;
1599 struct neigh_table *tbl;
1600 struct neighbour *neigh;
1601 struct net_device *dev = NULL;
1605 if (nlmsg_len(nlh) < sizeof(*ndm))
1608 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1609 if (dst_attr == NULL)
1612 ndm = nlmsg_data(nlh);
1613 if (ndm->ndm_ifindex) {
1614 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1621 tbl = neigh_find_table(ndm->ndm_family);
1623 return -EAFNOSUPPORT;
1625 if (nla_len(dst_attr) < tbl->key_len)
1628 if (ndm->ndm_flags & NTF_PROXY) {
1629 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1636 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1637 if (neigh == NULL) {
1642 err = neigh_update(neigh, NULL, NUD_FAILED,
1643 NEIGH_UPDATE_F_OVERRIDE |
1644 NEIGH_UPDATE_F_ADMIN,
1645 NETLINK_CB(skb).portid);
1646 neigh_release(neigh);
1652 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1653 struct netlink_ext_ack *extack)
1655 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1656 struct net *net = sock_net(skb->sk);
1658 struct nlattr *tb[NDA_MAX+1];
1659 struct neigh_table *tbl;
1660 struct net_device *dev = NULL;
1661 struct neighbour *neigh;
1666 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
1671 if (tb[NDA_DST] == NULL)
1674 ndm = nlmsg_data(nlh);
1675 if (ndm->ndm_ifindex) {
1676 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1682 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1686 tbl = neigh_find_table(ndm->ndm_family);
1688 return -EAFNOSUPPORT;
1690 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1692 dst = nla_data(tb[NDA_DST]);
1693 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1695 if (ndm->ndm_flags & NTF_PROXY) {
1696 struct pneigh_entry *pn;
1699 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1701 pn->flags = ndm->ndm_flags;
1710 neigh = neigh_lookup(tbl, dst, dev);
1711 if (neigh == NULL) {
1712 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1717 neigh = __neigh_lookup_errno(tbl, dst, dev);
1718 if (IS_ERR(neigh)) {
1719 err = PTR_ERR(neigh);
1723 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1725 neigh_release(neigh);
1729 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1730 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1733 if (ndm->ndm_flags & NTF_USE) {
1734 neigh_event_send(neigh, NULL);
1737 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
1738 NETLINK_CB(skb).portid);
1739 neigh_release(neigh);
1745 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1747 struct nlattr *nest;
1749 nest = nla_nest_start(skb, NDTA_PARMS);
1754 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1755 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1756 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
1757 NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
1758 /* approximative value for deprecated QUEUE_LEN (in packets) */
1759 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1760 NEIGH_VAR(parms, QUEUE_LEN_BYTES) / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1761 nla_put_u32(skb, NDTPA_PROXY_QLEN, NEIGH_VAR(parms, PROXY_QLEN)) ||
1762 nla_put_u32(skb, NDTPA_APP_PROBES, NEIGH_VAR(parms, APP_PROBES)) ||
1763 nla_put_u32(skb, NDTPA_UCAST_PROBES,
1764 NEIGH_VAR(parms, UCAST_PROBES)) ||
1765 nla_put_u32(skb, NDTPA_MCAST_PROBES,
1766 NEIGH_VAR(parms, MCAST_PROBES)) ||
1767 nla_put_u32(skb, NDTPA_MCAST_REPROBES,
1768 NEIGH_VAR(parms, MCAST_REPROBES)) ||
1769 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time,
1771 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1772 NEIGH_VAR(parms, BASE_REACHABLE_TIME), NDTPA_PAD) ||
1773 nla_put_msecs(skb, NDTPA_GC_STALETIME,
1774 NEIGH_VAR(parms, GC_STALETIME), NDTPA_PAD) ||
1775 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1776 NEIGH_VAR(parms, DELAY_PROBE_TIME), NDTPA_PAD) ||
1777 nla_put_msecs(skb, NDTPA_RETRANS_TIME,
1778 NEIGH_VAR(parms, RETRANS_TIME), NDTPA_PAD) ||
1779 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY,
1780 NEIGH_VAR(parms, ANYCAST_DELAY), NDTPA_PAD) ||
1781 nla_put_msecs(skb, NDTPA_PROXY_DELAY,
1782 NEIGH_VAR(parms, PROXY_DELAY), NDTPA_PAD) ||
1783 nla_put_msecs(skb, NDTPA_LOCKTIME,
1784 NEIGH_VAR(parms, LOCKTIME), NDTPA_PAD))
1785 goto nla_put_failure;
1786 return nla_nest_end(skb, nest);
1789 nla_nest_cancel(skb, nest);
1793 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1794 u32 pid, u32 seq, int type, int flags)
1796 struct nlmsghdr *nlh;
1797 struct ndtmsg *ndtmsg;
1799 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1803 ndtmsg = nlmsg_data(nlh);
1805 read_lock_bh(&tbl->lock);
1806 ndtmsg->ndtm_family = tbl->family;
1807 ndtmsg->ndtm_pad1 = 0;
1808 ndtmsg->ndtm_pad2 = 0;
1810 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1811 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
1812 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1813 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1814 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1815 goto nla_put_failure;
1817 unsigned long now = jiffies;
1818 unsigned int flush_delta = now - tbl->last_flush;
1819 unsigned int rand_delta = now - tbl->last_rand;
1820 struct neigh_hash_table *nht;
1821 struct ndt_config ndc = {
1822 .ndtc_key_len = tbl->key_len,
1823 .ndtc_entry_size = tbl->entry_size,
1824 .ndtc_entries = atomic_read(&tbl->entries),
1825 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1826 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1827 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1831 nht = rcu_dereference_bh(tbl->nht);
1832 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1833 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1834 rcu_read_unlock_bh();
1836 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1837 goto nla_put_failure;
1842 struct ndt_stats ndst;
1844 memset(&ndst, 0, sizeof(ndst));
1846 for_each_possible_cpu(cpu) {
1847 struct neigh_statistics *st;
1849 st = per_cpu_ptr(tbl->stats, cpu);
1850 ndst.ndts_allocs += st->allocs;
1851 ndst.ndts_destroys += st->destroys;
1852 ndst.ndts_hash_grows += st->hash_grows;
1853 ndst.ndts_res_failed += st->res_failed;
1854 ndst.ndts_lookups += st->lookups;
1855 ndst.ndts_hits += st->hits;
1856 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1857 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1858 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1859 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1860 ndst.ndts_table_fulls += st->table_fulls;
1863 if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
1865 goto nla_put_failure;
1868 BUG_ON(tbl->parms.dev);
1869 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1870 goto nla_put_failure;
1872 read_unlock_bh(&tbl->lock);
1873 nlmsg_end(skb, nlh);
1877 read_unlock_bh(&tbl->lock);
1878 nlmsg_cancel(skb, nlh);
1882 static int neightbl_fill_param_info(struct sk_buff *skb,
1883 struct neigh_table *tbl,
1884 struct neigh_parms *parms,
1885 u32 pid, u32 seq, int type,
1888 struct ndtmsg *ndtmsg;
1889 struct nlmsghdr *nlh;
1891 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1895 ndtmsg = nlmsg_data(nlh);
1897 read_lock_bh(&tbl->lock);
1898 ndtmsg->ndtm_family = tbl->family;
1899 ndtmsg->ndtm_pad1 = 0;
1900 ndtmsg->ndtm_pad2 = 0;
1902 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1903 neightbl_fill_parms(skb, parms) < 0)
1906 read_unlock_bh(&tbl->lock);
1907 nlmsg_end(skb, nlh);
1910 read_unlock_bh(&tbl->lock);
1911 nlmsg_cancel(skb, nlh);
1915 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1916 [NDTA_NAME] = { .type = NLA_STRING },
1917 [NDTA_THRESH1] = { .type = NLA_U32 },
1918 [NDTA_THRESH2] = { .type = NLA_U32 },
1919 [NDTA_THRESH3] = { .type = NLA_U32 },
1920 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1921 [NDTA_PARMS] = { .type = NLA_NESTED },
1924 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1925 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1926 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1927 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1928 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1929 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1930 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1931 [NDTPA_MCAST_REPROBES] = { .type = NLA_U32 },
1932 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1933 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1934 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1935 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1936 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1937 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1938 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1941 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
1942 struct netlink_ext_ack *extack)
1944 struct net *net = sock_net(skb->sk);
1945 struct neigh_table *tbl;
1946 struct ndtmsg *ndtmsg;
1947 struct nlattr *tb[NDTA_MAX+1];
1951 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1952 nl_neightbl_policy, extack);
1956 if (tb[NDTA_NAME] == NULL) {
1961 ndtmsg = nlmsg_data(nlh);
1963 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
1964 tbl = neigh_tables[tidx];
1967 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1969 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
1979 * We acquire tbl->lock to be nice to the periodic timers and
1980 * make sure they always see a consistent set of values.
1982 write_lock_bh(&tbl->lock);
1984 if (tb[NDTA_PARMS]) {
1985 struct nlattr *tbp[NDTPA_MAX+1];
1986 struct neigh_parms *p;
1989 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1990 nl_ntbl_parm_policy, extack);
1992 goto errout_tbl_lock;
1994 if (tbp[NDTPA_IFINDEX])
1995 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1997 p = lookup_neigh_parms(tbl, net, ifindex);
2000 goto errout_tbl_lock;
2003 for (i = 1; i <= NDTPA_MAX; i++) {
2008 case NDTPA_QUEUE_LEN:
2009 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2010 nla_get_u32(tbp[i]) *
2011 SKB_TRUESIZE(ETH_FRAME_LEN));
2013 case NDTPA_QUEUE_LENBYTES:
2014 NEIGH_VAR_SET(p, QUEUE_LEN_BYTES,
2015 nla_get_u32(tbp[i]));
2017 case NDTPA_PROXY_QLEN:
2018 NEIGH_VAR_SET(p, PROXY_QLEN,
2019 nla_get_u32(tbp[i]));
2021 case NDTPA_APP_PROBES:
2022 NEIGH_VAR_SET(p, APP_PROBES,
2023 nla_get_u32(tbp[i]));
2025 case NDTPA_UCAST_PROBES:
2026 NEIGH_VAR_SET(p, UCAST_PROBES,
2027 nla_get_u32(tbp[i]));
2029 case NDTPA_MCAST_PROBES:
2030 NEIGH_VAR_SET(p, MCAST_PROBES,
2031 nla_get_u32(tbp[i]));
2033 case NDTPA_MCAST_REPROBES:
2034 NEIGH_VAR_SET(p, MCAST_REPROBES,
2035 nla_get_u32(tbp[i]));
2037 case NDTPA_BASE_REACHABLE_TIME:
2038 NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
2039 nla_get_msecs(tbp[i]));
2040 /* update reachable_time as well, otherwise, the change will
2041 * only be effective after the next time neigh_periodic_work
2042 * decides to recompute it (can be multiple minutes)
2045 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
2047 case NDTPA_GC_STALETIME:
2048 NEIGH_VAR_SET(p, GC_STALETIME,
2049 nla_get_msecs(tbp[i]));
2051 case NDTPA_DELAY_PROBE_TIME:
2052 NEIGH_VAR_SET(p, DELAY_PROBE_TIME,
2053 nla_get_msecs(tbp[i]));
2054 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2056 case NDTPA_RETRANS_TIME:
2057 NEIGH_VAR_SET(p, RETRANS_TIME,
2058 nla_get_msecs(tbp[i]));
2060 case NDTPA_ANYCAST_DELAY:
2061 NEIGH_VAR_SET(p, ANYCAST_DELAY,
2062 nla_get_msecs(tbp[i]));
2064 case NDTPA_PROXY_DELAY:
2065 NEIGH_VAR_SET(p, PROXY_DELAY,
2066 nla_get_msecs(tbp[i]));
2068 case NDTPA_LOCKTIME:
2069 NEIGH_VAR_SET(p, LOCKTIME,
2070 nla_get_msecs(tbp[i]));
2077 if ((tb[NDTA_THRESH1] || tb[NDTA_THRESH2] ||
2078 tb[NDTA_THRESH3] || tb[NDTA_GC_INTERVAL]) &&
2079 !net_eq(net, &init_net))
2080 goto errout_tbl_lock;
2082 if (tb[NDTA_THRESH1])
2083 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2085 if (tb[NDTA_THRESH2])
2086 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2088 if (tb[NDTA_THRESH3])
2089 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2091 if (tb[NDTA_GC_INTERVAL])
2092 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2097 write_unlock_bh(&tbl->lock);
2102 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2104 struct net *net = sock_net(skb->sk);
2105 int family, tidx, nidx = 0;
2106 int tbl_skip = cb->args[0];
2107 int neigh_skip = cb->args[1];
2108 struct neigh_table *tbl;
2110 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2112 for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) {
2113 struct neigh_parms *p;
2115 tbl = neigh_tables[tidx];
2119 if (tidx < tbl_skip || (family && tbl->family != family))
2122 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2123 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2128 p = list_next_entry(&tbl->parms, list);
2129 list_for_each_entry_from(p, &tbl->parms_list, list) {
2130 if (!net_eq(neigh_parms_net(p), net))
2133 if (nidx < neigh_skip)
2136 if (neightbl_fill_param_info(skb, tbl, p,
2137 NETLINK_CB(cb->skb).portid,
2155 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2156 u32 pid, u32 seq, int type, unsigned int flags)
2158 unsigned long now = jiffies;
2159 struct nda_cacheinfo ci;
2160 struct nlmsghdr *nlh;
2163 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2167 ndm = nlmsg_data(nlh);
2168 ndm->ndm_family = neigh->ops->family;
2171 ndm->ndm_flags = neigh->flags;
2172 ndm->ndm_type = neigh->type;
2173 ndm->ndm_ifindex = neigh->dev->ifindex;
2175 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2176 goto nla_put_failure;
2178 read_lock_bh(&neigh->lock);
2179 ndm->ndm_state = neigh->nud_state;
2180 if (neigh->nud_state & NUD_VALID) {
2181 char haddr[MAX_ADDR_LEN];
2183 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2184 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2185 read_unlock_bh(&neigh->lock);
2186 goto nla_put_failure;
2190 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2191 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2192 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2193 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2194 read_unlock_bh(&neigh->lock);
2196 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2197 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2198 goto nla_put_failure;
2200 nlmsg_end(skb, nlh);
2204 nlmsg_cancel(skb, nlh);
2208 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2209 u32 pid, u32 seq, int type, unsigned int flags,
2210 struct neigh_table *tbl)
2212 struct nlmsghdr *nlh;
2215 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2219 ndm = nlmsg_data(nlh);
2220 ndm->ndm_family = tbl->family;
2223 ndm->ndm_flags = pn->flags | NTF_PROXY;
2224 ndm->ndm_type = RTN_UNICAST;
2225 ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
2226 ndm->ndm_state = NUD_NONE;
2228 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2229 goto nla_put_failure;
2231 nlmsg_end(skb, nlh);
2235 nlmsg_cancel(skb, nlh);
2239 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2241 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2242 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2245 static bool neigh_master_filtered(struct net_device *dev, int master_idx)
2247 struct net_device *master;
2252 master = netdev_master_upper_dev_get(dev);
2253 if (!master || master->ifindex != master_idx)
2259 static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
2261 if (filter_idx && dev->ifindex != filter_idx)
2267 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2268 struct netlink_callback *cb)
2270 struct net *net = sock_net(skb->sk);
2271 const struct nlmsghdr *nlh = cb->nlh;
2272 struct nlattr *tb[NDA_MAX + 1];
2273 struct neighbour *n;
2274 int rc, h, s_h = cb->args[1];
2275 int idx, s_idx = idx = cb->args[2];
2276 struct neigh_hash_table *nht;
2277 int filter_master_idx = 0, filter_idx = 0;
2278 unsigned int flags = NLM_F_MULTI;
2281 err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
2283 if (tb[NDA_IFINDEX])
2284 filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
2287 filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
2289 if (filter_idx || filter_master_idx)
2290 flags |= NLM_F_DUMP_FILTERED;
2294 nht = rcu_dereference_bh(tbl->nht);
2296 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2299 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2301 n = rcu_dereference_bh(n->next)) {
2302 if (idx < s_idx || !net_eq(dev_net(n->dev), net))
2304 if (neigh_ifindex_filtered(n->dev, filter_idx) ||
2305 neigh_master_filtered(n->dev, filter_master_idx))
2307 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2320 rcu_read_unlock_bh();
2326 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2327 struct netlink_callback *cb)
2329 struct pneigh_entry *n;
2330 struct net *net = sock_net(skb->sk);
2331 int rc, h, s_h = cb->args[3];
2332 int idx, s_idx = idx = cb->args[4];
2334 read_lock_bh(&tbl->lock);
2336 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2339 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2340 if (idx < s_idx || pneigh_net(n) != net)
2342 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2345 NLM_F_MULTI, tbl) < 0) {
2346 read_unlock_bh(&tbl->lock);
2355 read_unlock_bh(&tbl->lock);
2364 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2366 struct neigh_table *tbl;
2371 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2373 /* check for full ndmsg structure presence, family member is
2374 * the same for both structures
2376 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2377 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2382 for (t = 0; t < NEIGH_NR_TABLES; t++) {
2383 tbl = neigh_tables[t];
2387 if (t < s_t || (family && tbl->family != family))
2390 memset(&cb->args[1], 0, sizeof(cb->args) -
2391 sizeof(cb->args[0]));
2393 err = pneigh_dump_table(tbl, skb, cb);
2395 err = neigh_dump_table(tbl, skb, cb);
2404 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2407 struct neigh_hash_table *nht;
2410 nht = rcu_dereference_bh(tbl->nht);
2412 read_lock(&tbl->lock); /* avoid resizes */
2413 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2414 struct neighbour *n;
2416 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2418 n = rcu_dereference_bh(n->next))
2421 read_unlock(&tbl->lock);
2422 rcu_read_unlock_bh();
2424 EXPORT_SYMBOL(neigh_for_each);
2426 /* The tbl->lock must be held as a writer and BH disabled. */
2427 void __neigh_for_each_release(struct neigh_table *tbl,
2428 int (*cb)(struct neighbour *))
2431 struct neigh_hash_table *nht;
2433 nht = rcu_dereference_protected(tbl->nht,
2434 lockdep_is_held(&tbl->lock));
2435 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2436 struct neighbour *n;
2437 struct neighbour __rcu **np;
2439 np = &nht->hash_buckets[chain];
2440 while ((n = rcu_dereference_protected(*np,
2441 lockdep_is_held(&tbl->lock))) != NULL) {
2444 write_lock(&n->lock);
2447 rcu_assign_pointer(*np,
2448 rcu_dereference_protected(n->next,
2449 lockdep_is_held(&tbl->lock)));
2453 write_unlock(&n->lock);
2455 neigh_cleanup_and_release(n);
2459 EXPORT_SYMBOL(__neigh_for_each_release);
2461 int neigh_xmit(int index, struct net_device *dev,
2462 const void *addr, struct sk_buff *skb)
2464 int err = -EAFNOSUPPORT;
2465 if (likely(index < NEIGH_NR_TABLES)) {
2466 struct neigh_table *tbl;
2467 struct neighbour *neigh;
2469 tbl = neigh_tables[index];
2473 neigh = __neigh_lookup_noref(tbl, addr, dev);
2475 neigh = __neigh_create(tbl, addr, dev, false);
2476 err = PTR_ERR(neigh);
2477 if (IS_ERR(neigh)) {
2478 rcu_read_unlock_bh();
2481 err = neigh->output(neigh, skb);
2482 rcu_read_unlock_bh();
2484 else if (index == NEIGH_LINK_TABLE) {
2485 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
2486 addr, NULL, skb->len);
2489 err = dev_queue_xmit(skb);
2497 EXPORT_SYMBOL(neigh_xmit);
2499 #ifdef CONFIG_PROC_FS
2501 static struct neighbour *neigh_get_first(struct seq_file *seq)
2503 struct neigh_seq_state *state = seq->private;
2504 struct net *net = seq_file_net(seq);
2505 struct neigh_hash_table *nht = state->nht;
2506 struct neighbour *n = NULL;
2507 int bucket = state->bucket;
2509 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2510 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2511 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2514 if (!net_eq(dev_net(n->dev), net))
2516 if (state->neigh_sub_iter) {
2520 v = state->neigh_sub_iter(state, n, &fakep);
2524 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2526 if (n->nud_state & ~NUD_NOARP)
2529 n = rcu_dereference_bh(n->next);
2535 state->bucket = bucket;
2540 static struct neighbour *neigh_get_next(struct seq_file *seq,
2541 struct neighbour *n,
2544 struct neigh_seq_state *state = seq->private;
2545 struct net *net = seq_file_net(seq);
2546 struct neigh_hash_table *nht = state->nht;
2548 if (state->neigh_sub_iter) {
2549 void *v = state->neigh_sub_iter(state, n, pos);
2553 n = rcu_dereference_bh(n->next);
2557 if (!net_eq(dev_net(n->dev), net))
2559 if (state->neigh_sub_iter) {
2560 void *v = state->neigh_sub_iter(state, n, pos);
2565 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2568 if (n->nud_state & ~NUD_NOARP)
2571 n = rcu_dereference_bh(n->next);
2577 if (++state->bucket >= (1 << nht->hash_shift))
2580 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2588 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2590 struct neighbour *n = neigh_get_first(seq);
2595 n = neigh_get_next(seq, n, pos);
2600 return *pos ? NULL : n;
2603 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2605 struct neigh_seq_state *state = seq->private;
2606 struct net *net = seq_file_net(seq);
2607 struct neigh_table *tbl = state->tbl;
2608 struct pneigh_entry *pn = NULL;
2609 int bucket = state->bucket;
2611 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2612 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2613 pn = tbl->phash_buckets[bucket];
2614 while (pn && !net_eq(pneigh_net(pn), net))
2619 state->bucket = bucket;
2624 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2625 struct pneigh_entry *pn,
2628 struct neigh_seq_state *state = seq->private;
2629 struct net *net = seq_file_net(seq);
2630 struct neigh_table *tbl = state->tbl;
2634 } while (pn && !net_eq(pneigh_net(pn), net));
2637 if (++state->bucket > PNEIGH_HASHMASK)
2639 pn = tbl->phash_buckets[state->bucket];
2640 while (pn && !net_eq(pneigh_net(pn), net))
2652 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2654 struct pneigh_entry *pn = pneigh_get_first(seq);
2659 pn = pneigh_get_next(seq, pn, pos);
2664 return *pos ? NULL : pn;
2667 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2669 struct neigh_seq_state *state = seq->private;
2671 loff_t idxpos = *pos;
2673 rc = neigh_get_idx(seq, &idxpos);
2674 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2675 rc = pneigh_get_idx(seq, &idxpos);
2680 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2683 struct neigh_seq_state *state = seq->private;
2687 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2690 state->nht = rcu_dereference_bh(tbl->nht);
2692 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2694 EXPORT_SYMBOL(neigh_seq_start);
2696 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2698 struct neigh_seq_state *state;
2701 if (v == SEQ_START_TOKEN) {
2702 rc = neigh_get_first(seq);
2706 state = seq->private;
2707 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2708 rc = neigh_get_next(seq, v, NULL);
2711 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2712 rc = pneigh_get_first(seq);
2714 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2715 rc = pneigh_get_next(seq, v, NULL);
2721 EXPORT_SYMBOL(neigh_seq_next);
2723 void neigh_seq_stop(struct seq_file *seq, void *v)
2726 rcu_read_unlock_bh();
2728 EXPORT_SYMBOL(neigh_seq_stop);
2730 /* statistics via seq_file */
2732 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2734 struct neigh_table *tbl = seq->private;
2738 return SEQ_START_TOKEN;
2740 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2741 if (!cpu_possible(cpu))
2744 return per_cpu_ptr(tbl->stats, cpu);
2749 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2751 struct neigh_table *tbl = seq->private;
2754 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2755 if (!cpu_possible(cpu))
2758 return per_cpu_ptr(tbl->stats, cpu);
2763 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2768 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2770 struct neigh_table *tbl = seq->private;
2771 struct neigh_statistics *st = v;
2773 if (v == SEQ_START_TOKEN) {
2774 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
2778 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2779 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
2780 atomic_read(&tbl->entries),
2791 st->rcv_probes_mcast,
2792 st->rcv_probes_ucast,
2794 st->periodic_gc_runs,
2803 static const struct seq_operations neigh_stat_seq_ops = {
2804 .start = neigh_stat_seq_start,
2805 .next = neigh_stat_seq_next,
2806 .stop = neigh_stat_seq_stop,
2807 .show = neigh_stat_seq_show,
2810 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2812 int ret = seq_open(file, &neigh_stat_seq_ops);
2815 struct seq_file *sf = file->private_data;
2816 sf->private = PDE_DATA(inode);
2821 static const struct file_operations neigh_stat_seq_fops = {
2822 .owner = THIS_MODULE,
2823 .open = neigh_stat_seq_open,
2825 .llseek = seq_lseek,
2826 .release = seq_release,
2829 #endif /* CONFIG_PROC_FS */
2831 static inline size_t neigh_nlmsg_size(void)
2833 return NLMSG_ALIGN(sizeof(struct ndmsg))
2834 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2835 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2836 + nla_total_size(sizeof(struct nda_cacheinfo))
2837 + nla_total_size(4); /* NDA_PROBES */
2840 static void __neigh_notify(struct neighbour *n, int type, int flags,
2843 struct net *net = dev_net(n->dev);
2844 struct sk_buff *skb;
2847 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2851 err = neigh_fill_info(skb, n, pid, 0, type, flags);
2853 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2854 WARN_ON(err == -EMSGSIZE);
2858 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2862 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2865 void neigh_app_ns(struct neighbour *n)
2867 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST, 0);
2869 EXPORT_SYMBOL(neigh_app_ns);
2871 #ifdef CONFIG_SYSCTL
2873 static int int_max = INT_MAX;
2874 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2876 static int proc_unres_qlen(struct ctl_table *ctl, int write,
2877 void __user *buffer, size_t *lenp, loff_t *ppos)
2880 struct ctl_table tmp = *ctl;
2883 tmp.extra2 = &unres_qlen_max;
2886 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2887 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2890 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2894 static struct neigh_parms *neigh_get_dev_parms_rcu(struct net_device *dev,
2899 return __in_dev_arp_parms_get_rcu(dev);
2901 return __in6_dev_nd_parms_get_rcu(dev);
2906 static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p,
2909 struct net_device *dev;
2910 int family = neigh_parms_family(p);
2913 for_each_netdev_rcu(net, dev) {
2914 struct neigh_parms *dst_p =
2915 neigh_get_dev_parms_rcu(dev, family);
2917 if (dst_p && !test_bit(index, dst_p->data_state))
2918 dst_p->data[index] = p->data[index];
2923 static void neigh_proc_update(struct ctl_table *ctl, int write)
2925 struct net_device *dev = ctl->extra1;
2926 struct neigh_parms *p = ctl->extra2;
2927 struct net *net = neigh_parms_net(p);
2928 int index = (int *) ctl->data - p->data;
2933 set_bit(index, p->data_state);
2934 if (index == NEIGH_VAR_DELAY_PROBE_TIME)
2935 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
2936 if (!dev) /* NULL dev means this is default value */
2937 neigh_copy_dflt_parms(net, p, index);
2940 static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
2941 void __user *buffer,
2942 size_t *lenp, loff_t *ppos)
2944 struct ctl_table tmp = *ctl;
2948 tmp.extra2 = &int_max;
2950 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2951 neigh_proc_update(ctl, write);
2955 int neigh_proc_dointvec(struct ctl_table *ctl, int write,
2956 void __user *buffer, size_t *lenp, loff_t *ppos)
2958 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2960 neigh_proc_update(ctl, write);
2963 EXPORT_SYMBOL(neigh_proc_dointvec);
2965 int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
2966 void __user *buffer,
2967 size_t *lenp, loff_t *ppos)
2969 int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
2971 neigh_proc_update(ctl, write);
2974 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
2976 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
2977 void __user *buffer,
2978 size_t *lenp, loff_t *ppos)
2980 int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
2982 neigh_proc_update(ctl, write);
2986 int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
2987 void __user *buffer,
2988 size_t *lenp, loff_t *ppos)
2990 int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
2992 neigh_proc_update(ctl, write);
2995 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
2997 static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
2998 void __user *buffer,
2999 size_t *lenp, loff_t *ppos)
3001 int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
3003 neigh_proc_update(ctl, write);
3007 static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
3008 void __user *buffer,
3009 size_t *lenp, loff_t *ppos)
3011 struct neigh_parms *p = ctl->extra2;
3014 if (strcmp(ctl->procname, "base_reachable_time") == 0)
3015 ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
3016 else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
3017 ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
3021 if (write && ret == 0) {
3022 /* update reachable_time as well, otherwise, the change will
3023 * only be effective after the next time neigh_periodic_work
3024 * decides to recompute it
3027 neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
3032 #define NEIGH_PARMS_DATA_OFFSET(index) \
3033 (&((struct neigh_parms *) 0)->data[index])
3035 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3036 [NEIGH_VAR_ ## attr] = { \
3038 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3039 .maxlen = sizeof(int), \
3041 .proc_handler = proc, \
3044 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3045 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3047 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3048 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3050 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3051 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3053 #define NEIGH_SYSCTL_MS_JIFFIES_ENTRY(attr, name) \
3054 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3056 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3057 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3059 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3060 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3062 static struct neigh_sysctl_table {
3063 struct ctl_table_header *sysctl_header;
3064 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
3065 } neigh_sysctl_template __read_mostly = {
3067 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES, "mcast_solicit"),
3068 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES, "ucast_solicit"),
3069 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES, "app_solicit"),
3070 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES, "mcast_resolicit"),
3071 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME, "retrans_time"),
3072 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME, "base_reachable_time"),
3073 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME, "delay_first_probe_time"),
3074 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME, "gc_stale_time"),
3075 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES, "unres_qlen_bytes"),
3076 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN, "proxy_qlen"),
3077 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY, "anycast_delay"),
3078 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY, "proxy_delay"),
3079 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME, "locktime"),
3080 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN, QUEUE_LEN_BYTES, "unres_qlen"),
3081 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS, RETRANS_TIME, "retrans_time_ms"),
3082 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS, BASE_REACHABLE_TIME, "base_reachable_time_ms"),
3083 [NEIGH_VAR_GC_INTERVAL] = {
3084 .procname = "gc_interval",
3085 .maxlen = sizeof(int),
3087 .proc_handler = proc_dointvec_jiffies,
3089 [NEIGH_VAR_GC_THRESH1] = {
3090 .procname = "gc_thresh1",
3091 .maxlen = sizeof(int),
3095 .proc_handler = proc_dointvec_minmax,
3097 [NEIGH_VAR_GC_THRESH2] = {
3098 .procname = "gc_thresh2",
3099 .maxlen = sizeof(int),
3103 .proc_handler = proc_dointvec_minmax,
3105 [NEIGH_VAR_GC_THRESH3] = {
3106 .procname = "gc_thresh3",
3107 .maxlen = sizeof(int),
3111 .proc_handler = proc_dointvec_minmax,
3117 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3118 proc_handler *handler)
3121 struct neigh_sysctl_table *t;
3122 const char *dev_name_source;
3123 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3126 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
3130 for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
3131 t->neigh_vars[i].data += (long) p;
3132 t->neigh_vars[i].extra1 = dev;
3133 t->neigh_vars[i].extra2 = p;
3137 dev_name_source = dev->name;
3138 /* Terminate the table early */
3139 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3140 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3142 struct neigh_table *tbl = p->tbl;
3143 dev_name_source = "default";
3144 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3145 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3146 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3147 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3152 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
3154 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
3155 /* RetransTime (in milliseconds)*/
3156 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
3157 /* ReachableTime (in milliseconds) */
3158 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
3160 /* Those handlers will update p->reachable_time after
3161 * base_reachable_time(_ms) is set to ensure the new timer starts being
3162 * applied after the next neighbour update instead of waiting for
3163 * neigh_periodic_work to update its value (can be multiple minutes)
3164 * So any handler that replaces them should do this as well
3167 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
3168 neigh_proc_base_reachable_time;
3169 /* ReachableTime (in milliseconds) */
3170 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
3171 neigh_proc_base_reachable_time;
3174 /* Don't export sysctls to unprivileged users */
3175 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3176 t->neigh_vars[0].procname = NULL;
3178 switch (neigh_parms_family(p)) {
3189 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3190 p_name, dev_name_source);
3192 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3193 if (!t->sysctl_header)
3196 p->sysctl_table = t;
3204 EXPORT_SYMBOL(neigh_sysctl_register);
3206 void neigh_sysctl_unregister(struct neigh_parms *p)
3208 if (p->sysctl_table) {
3209 struct neigh_sysctl_table *t = p->sysctl_table;
3210 p->sysctl_table = NULL;
3211 unregister_net_sysctl_table(t->sysctl_header);
3215 EXPORT_SYMBOL(neigh_sysctl_unregister);
3217 #endif /* CONFIG_SYSCTL */
3219 static int __init neigh_init(void)
3221 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3222 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3223 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3225 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3227 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3232 subsys_initcall(neigh_init);