2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "flow_netlink.h"
22 #include <linux/uaccess.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/if_ether.h>
26 #include <linux/if_vlan.h>
27 #include <net/llc_pdu.h>
28 #include <linux/kernel.h>
29 #include <linux/jhash.h>
30 #include <linux/jiffies.h>
31 #include <linux/llc.h>
32 #include <linux/module.h>
34 #include <linux/rcupdate.h>
35 #include <linux/cpumask.h>
36 #include <linux/if_arp.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
47 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
52 static struct kmem_cache *flow_cache;
53 struct kmem_cache *flow_stats_cache __read_mostly;
55 static u16 range_n_bytes(const struct sw_flow_key_range *range)
57 return range->end - range->start;
60 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
61 bool full, const struct sw_flow_mask *mask)
63 int start = full ? 0 : mask->range.start;
64 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
65 const long *m = (const long *)((const u8 *)&mask->key + start);
66 const long *s = (const long *)((const u8 *)src + start);
67 long *d = (long *)((u8 *)dst + start);
70 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
71 * if 'full' is false the memory outside of the 'mask->range' is left
72 * uninitialized. This can be used as an optimization when further
73 * operations on 'dst' only use contents within 'mask->range'.
75 for (i = 0; i < len; i += sizeof(long))
79 struct sw_flow *ovs_flow_alloc(void)
82 struct flow_stats *stats;
84 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
86 return ERR_PTR(-ENOMEM);
88 flow->stats_last_writer = -1;
90 /* Initialize the default stat node. */
91 stats = kmem_cache_alloc_node(flow_stats_cache,
92 GFP_KERNEL | __GFP_ZERO,
93 node_online(0) ? 0 : NUMA_NO_NODE);
97 spin_lock_init(&stats->lock);
99 RCU_INIT_POINTER(flow->stats[0], stats);
101 cpumask_set_cpu(0, &flow->cpu_used_mask);
105 kmem_cache_free(flow_cache, flow);
106 return ERR_PTR(-ENOMEM);
109 int ovs_flow_tbl_count(const struct flow_table *table)
114 static void flow_free(struct sw_flow *flow)
118 if (ovs_identifier_is_key(&flow->id))
119 kfree(flow->id.unmasked_key);
121 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
122 /* We open code this to make sure cpu 0 is always considered */
123 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
124 if (flow->stats[cpu])
125 kmem_cache_free(flow_stats_cache,
126 (struct flow_stats __force *)flow->stats[cpu]);
127 kmem_cache_free(flow_cache, flow);
130 static void rcu_free_flow_callback(struct rcu_head *rcu)
132 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
137 void ovs_flow_free(struct sw_flow *flow, bool deferred)
143 call_rcu(&flow->rcu, rcu_free_flow_callback);
148 static void __table_instance_destroy(struct table_instance *ti)
154 static struct table_instance *table_instance_alloc(int new_size)
156 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
162 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
169 for (i = 0; i < new_size; i++)
170 INIT_HLIST_HEAD(&ti->buckets[i]);
172 ti->n_buckets = new_size;
174 ti->keep_flows = false;
175 get_random_bytes(&ti->hash_seed, sizeof(u32));
180 int ovs_flow_tbl_init(struct flow_table *table)
182 struct table_instance *ti, *ufid_ti;
184 ti = table_instance_alloc(TBL_MIN_BUCKETS);
189 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
193 rcu_assign_pointer(table->ti, ti);
194 rcu_assign_pointer(table->ufid_ti, ufid_ti);
195 INIT_LIST_HEAD(&table->mask_list);
196 table->last_rehash = jiffies;
198 table->ufid_count = 0;
202 __table_instance_destroy(ti);
206 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
208 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
210 __table_instance_destroy(ti);
213 static void table_instance_destroy(struct table_instance *ti,
214 struct table_instance *ufid_ti,
226 for (i = 0; i < ti->n_buckets; i++) {
227 struct sw_flow *flow;
228 struct hlist_head *head = &ti->buckets[i];
229 struct hlist_node *n;
230 int ver = ti->node_ver;
231 int ufid_ver = ufid_ti->node_ver;
233 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
234 hlist_del_rcu(&flow->flow_table.node[ver]);
235 if (ovs_identifier_is_ufid(&flow->id))
236 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
237 ovs_flow_free(flow, deferred);
243 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
244 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
246 __table_instance_destroy(ti);
247 __table_instance_destroy(ufid_ti);
251 /* No need for locking this function is called from RCU callback or
254 void ovs_flow_tbl_destroy(struct flow_table *table)
256 struct table_instance *ti = rcu_dereference_raw(table->ti);
257 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
259 table_instance_destroy(ti, ufid_ti, false);
262 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
263 u32 *bucket, u32 *last)
265 struct sw_flow *flow;
266 struct hlist_head *head;
271 while (*bucket < ti->n_buckets) {
273 head = &ti->buckets[*bucket];
274 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
289 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
291 hash = jhash_1word(hash, ti->hash_seed);
292 return &ti->buckets[hash & (ti->n_buckets - 1)];
295 static void table_instance_insert(struct table_instance *ti,
296 struct sw_flow *flow)
298 struct hlist_head *head;
300 head = find_bucket(ti, flow->flow_table.hash);
301 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
304 static void ufid_table_instance_insert(struct table_instance *ti,
305 struct sw_flow *flow)
307 struct hlist_head *head;
309 head = find_bucket(ti, flow->ufid_table.hash);
310 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
313 static void flow_table_copy_flows(struct table_instance *old,
314 struct table_instance *new, bool ufid)
319 old_ver = old->node_ver;
320 new->node_ver = !old_ver;
322 /* Insert in new table. */
323 for (i = 0; i < old->n_buckets; i++) {
324 struct sw_flow *flow;
325 struct hlist_head *head = &old->buckets[i];
328 hlist_for_each_entry(flow, head,
329 ufid_table.node[old_ver])
330 ufid_table_instance_insert(new, flow);
332 hlist_for_each_entry(flow, head,
333 flow_table.node[old_ver])
334 table_instance_insert(new, flow);
337 old->keep_flows = true;
340 static struct table_instance *table_instance_rehash(struct table_instance *ti,
341 int n_buckets, bool ufid)
343 struct table_instance *new_ti;
345 new_ti = table_instance_alloc(n_buckets);
349 flow_table_copy_flows(ti, new_ti, ufid);
354 int ovs_flow_tbl_flush(struct flow_table *flow_table)
356 struct table_instance *old_ti, *new_ti;
357 struct table_instance *old_ufid_ti, *new_ufid_ti;
359 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
362 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
366 old_ti = ovsl_dereference(flow_table->ti);
367 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
369 rcu_assign_pointer(flow_table->ti, new_ti);
370 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
371 flow_table->last_rehash = jiffies;
372 flow_table->count = 0;
373 flow_table->ufid_count = 0;
375 table_instance_destroy(old_ti, old_ufid_ti, true);
379 __table_instance_destroy(new_ti);
383 static u32 flow_hash(const struct sw_flow_key *key,
384 const struct sw_flow_key_range *range)
386 int key_start = range->start;
387 int key_end = range->end;
388 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
389 int hash_u32s = (key_end - key_start) >> 2;
391 /* Make sure number of hash bytes are multiple of u32. */
392 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
394 return jhash2(hash_key, hash_u32s, 0);
397 static int flow_key_start(const struct sw_flow_key *key)
402 return rounddown(offsetof(struct sw_flow_key, phy),
406 static bool cmp_key(const struct sw_flow_key *key1,
407 const struct sw_flow_key *key2,
408 int key_start, int key_end)
410 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
411 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
415 for (i = key_start; i < key_end; i += sizeof(long))
416 diffs |= *cp1++ ^ *cp2++;
421 static bool flow_cmp_masked_key(const struct sw_flow *flow,
422 const struct sw_flow_key *key,
423 const struct sw_flow_key_range *range)
425 return cmp_key(&flow->key, key, range->start, range->end);
428 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
429 const struct sw_flow_match *match)
431 struct sw_flow_key *key = match->key;
432 int key_start = flow_key_start(key);
433 int key_end = match->range.end;
435 BUG_ON(ovs_identifier_is_ufid(&flow->id));
436 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
439 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
440 const struct sw_flow_key *unmasked,
441 const struct sw_flow_mask *mask)
443 struct sw_flow *flow;
444 struct hlist_head *head;
446 struct sw_flow_key masked_key;
448 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
449 hash = flow_hash(&masked_key, &mask->range);
450 head = find_bucket(ti, hash);
451 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
452 if (flow->mask == mask && flow->flow_table.hash == hash &&
453 flow_cmp_masked_key(flow, &masked_key, &mask->range))
459 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
460 const struct sw_flow_key *key,
463 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
464 struct sw_flow_mask *mask;
465 struct sw_flow *flow;
468 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
470 flow = masked_flow_lookup(ti, key, mask);
471 if (flow) /* Found */
477 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
478 const struct sw_flow_key *key)
480 u32 __always_unused n_mask_hit;
482 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
485 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
486 const struct sw_flow_match *match)
488 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
489 struct sw_flow_mask *mask;
490 struct sw_flow *flow;
492 /* Always called under ovs-mutex. */
493 list_for_each_entry(mask, &tbl->mask_list, list) {
494 flow = masked_flow_lookup(ti, match->key, mask);
495 if (flow && ovs_identifier_is_key(&flow->id) &&
496 ovs_flow_cmp_unmasked_key(flow, match))
502 static u32 ufid_hash(const struct sw_flow_id *sfid)
504 return jhash(sfid->ufid, sfid->ufid_len, 0);
507 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
508 const struct sw_flow_id *sfid)
510 if (flow->id.ufid_len != sfid->ufid_len)
513 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
516 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
518 if (ovs_identifier_is_ufid(&flow->id))
519 return flow_cmp_masked_key(flow, match->key, &match->range);
521 return ovs_flow_cmp_unmasked_key(flow, match);
524 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
525 const struct sw_flow_id *ufid)
527 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
528 struct sw_flow *flow;
529 struct hlist_head *head;
532 hash = ufid_hash(ufid);
533 head = find_bucket(ti, hash);
534 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
535 if (flow->ufid_table.hash == hash &&
536 ovs_flow_cmp_ufid(flow, ufid))
542 int ovs_flow_tbl_num_masks(const struct flow_table *table)
544 struct sw_flow_mask *mask;
547 list_for_each_entry(mask, &table->mask_list, list)
553 static struct table_instance *table_instance_expand(struct table_instance *ti,
556 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
559 /* Remove 'mask' from the mask list, if it is not needed any more. */
560 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
563 /* ovs-lock is required to protect mask-refcount and
567 BUG_ON(!mask->ref_count);
570 if (!mask->ref_count) {
571 list_del_rcu(&mask->list);
572 kfree_rcu(mask, rcu);
577 /* Must be called with OVS mutex held. */
578 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
580 struct table_instance *ti = ovsl_dereference(table->ti);
581 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
583 BUG_ON(table->count == 0);
584 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
586 if (ovs_identifier_is_ufid(&flow->id)) {
587 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
591 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
592 * accessible as long as the RCU read lock is held.
594 flow_mask_remove(table, flow->mask);
597 static struct sw_flow_mask *mask_alloc(void)
599 struct sw_flow_mask *mask;
601 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
608 static bool mask_equal(const struct sw_flow_mask *a,
609 const struct sw_flow_mask *b)
611 const u8 *a_ = (const u8 *)&a->key + a->range.start;
612 const u8 *b_ = (const u8 *)&b->key + b->range.start;
614 return (a->range.end == b->range.end)
615 && (a->range.start == b->range.start)
616 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
619 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
620 const struct sw_flow_mask *mask)
622 struct list_head *ml;
624 list_for_each(ml, &tbl->mask_list) {
625 struct sw_flow_mask *m;
626 m = container_of(ml, struct sw_flow_mask, list);
627 if (mask_equal(mask, m))
634 /* Add 'mask' into the mask list, if it is not already there. */
635 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
636 const struct sw_flow_mask *new)
638 struct sw_flow_mask *mask;
639 mask = flow_mask_find(tbl, new);
641 /* Allocate a new mask if none exsits. */
645 mask->key = new->key;
646 mask->range = new->range;
647 list_add_rcu(&mask->list, &tbl->mask_list);
649 BUG_ON(!mask->ref_count);
657 /* Must be called with OVS mutex held. */
658 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
660 struct table_instance *new_ti = NULL;
661 struct table_instance *ti;
663 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
664 ti = ovsl_dereference(table->ti);
665 table_instance_insert(ti, flow);
668 /* Expand table, if necessary, to make room. */
669 if (table->count > ti->n_buckets)
670 new_ti = table_instance_expand(ti, false);
671 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
672 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
675 rcu_assign_pointer(table->ti, new_ti);
676 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
677 table->last_rehash = jiffies;
681 /* Must be called with OVS mutex held. */
682 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
684 struct table_instance *ti;
686 flow->ufid_table.hash = ufid_hash(&flow->id);
687 ti = ovsl_dereference(table->ufid_ti);
688 ufid_table_instance_insert(ti, flow);
691 /* Expand table, if necessary, to make room. */
692 if (table->ufid_count > ti->n_buckets) {
693 struct table_instance *new_ti;
695 new_ti = table_instance_expand(ti, true);
697 rcu_assign_pointer(table->ufid_ti, new_ti);
698 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
703 /* Must be called with OVS mutex held. */
704 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
705 const struct sw_flow_mask *mask)
709 err = flow_mask_insert(table, flow, mask);
712 flow_key_insert(table, flow);
713 if (ovs_identifier_is_ufid(&flow->id))
714 flow_ufid_insert(table, flow);
719 /* Initializes the flow module.
720 * Returns zero if successful or a negative error code. */
721 int ovs_flow_init(void)
723 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
724 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
726 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
728 * sizeof(struct flow_stats *)),
730 if (flow_cache == NULL)
734 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
735 0, SLAB_HWCACHE_ALIGN, NULL);
736 if (flow_stats_cache == NULL) {
737 kmem_cache_destroy(flow_cache);
745 /* Uninitializes the flow module. */
746 void ovs_flow_exit(void)
748 kmem_cache_destroy(flow_stats_cache);
749 kmem_cache_destroy(flow_cache);