]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
netfilter: x_tables: don't use seqlock when fetching old counters
authorFlorian Westphal <fw@strlen.de>
Wed, 11 Oct 2017 23:13:51 +0000 (01:13 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Tue, 24 Oct 2017 16:01:50 +0000 (18:01 +0200)
after previous commit xt_replace_table will wait until all cpus
had even seqcount (i.e., no cpu is accessing old ruleset).

Add a 'old' counter retrival version that doesn't synchronize counters.
Its not needed, the old counters are not in use anymore at this point.

This speeds up table replacement on busy systems with large tables
(and many cores).

Cc: Dan Williams <dcbw@redhat.com>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c

index 9e2770fd00be5eda1a633893c42b59378ab52f17..f88221aebc9d7b61cf2c09f2b3d2351c4095f64f 100644 (file)
@@ -634,6 +634,25 @@ static void get_counters(const struct xt_table_info *t,
        }
 }
 
+static void get_old_counters(const struct xt_table_info *t,
+                            struct xt_counters counters[])
+{
+       struct arpt_entry *iter;
+       unsigned int cpu, i;
+
+       for_each_possible_cpu(cpu) {
+               i = 0;
+               xt_entry_foreach(iter, t->entries, t->size) {
+                       struct xt_counters *tmp;
+
+                       tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
+                       ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
+                       ++i;
+               }
+               cond_resched();
+       }
+}
+
 static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
@@ -910,8 +929,7 @@ static int __do_replace(struct net *net, const char *name,
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
 
-       /* Get the old counters, and synchronize with replace */
-       get_counters(oldinfo, counters);
+       get_old_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
        loc_cpu_old_entry = oldinfo->entries;
index 39286e543ee683fee9ee7d302d860ec3ee0ea104..4cbe5e80f3bf079755cd08f33c24a4077c6c4a63 100644 (file)
@@ -781,6 +781,26 @@ get_counters(const struct xt_table_info *t,
        }
 }
 
+static void get_old_counters(const struct xt_table_info *t,
+                            struct xt_counters counters[])
+{
+       struct ipt_entry *iter;
+       unsigned int cpu, i;
+
+       for_each_possible_cpu(cpu) {
+               i = 0;
+               xt_entry_foreach(iter, t->entries, t->size) {
+                       const struct xt_counters *tmp;
+
+                       tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
+                       ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
+                       ++i; /* macro does multi eval of i */
+               }
+
+               cond_resched();
+       }
+}
+
 static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
@@ -1070,8 +1090,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
 
-       /* Get the old counters, and synchronize with replace */
-       get_counters(oldinfo, counters);
+       get_old_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
        xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
index 01bd3ee5ebc685f1c1735dfe375633d49c2c5437..f06e25065a342e361d7ae68ae1d60304b3f43f39 100644 (file)
@@ -800,6 +800,25 @@ get_counters(const struct xt_table_info *t,
        }
 }
 
+static void get_old_counters(const struct xt_table_info *t,
+                            struct xt_counters counters[])
+{
+       struct ip6t_entry *iter;
+       unsigned int cpu, i;
+
+       for_each_possible_cpu(cpu) {
+               i = 0;
+               xt_entry_foreach(iter, t->entries, t->size) {
+                       const struct xt_counters *tmp;
+
+                       tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
+                       ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
+                       ++i;
+               }
+               cond_resched();
+       }
+}
+
 static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
@@ -1090,8 +1109,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
            (newinfo->number <= oldinfo->initial_entries))
                module_put(t->me);
 
-       /* Get the old counters, and synchronize with replace */
-       get_counters(oldinfo, counters);
+       get_old_counters(oldinfo, counters);
 
        /* Decrease module usage counts and free resource */
        xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)