]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - net/netfilter/nf_flow_table_core.c
Merge tag 'tegra-for-5.5-soc-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / net / netfilter / nf_flow_table_core.c
index 128245efe84abec57723f2c26262c90d7b3e27e8..9889d52eda8203a5e247ef2e5d2c477f05721e6d 100644 (file)
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 
-struct flow_offload_entry {
-       struct flow_offload     flow;
-       struct nf_conn          *ct;
-       struct rcu_head         rcu_head;
-};
-
 static DEFINE_MUTEX(flowtable_lock);
 static LIST_HEAD(flowtables);
 
 static void
-flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
-                     struct nf_flow_route *route,
+flow_offload_fill_dir(struct flow_offload *flow,
                      enum flow_offload_tuple_dir dir)
 {
        struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
-       struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
-       struct dst_entry *other_dst = route->tuple[!dir].dst;
-       struct dst_entry *dst = route->tuple[dir].dst;
+       struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
 
        ft->dir = dir;
 
@@ -39,12 +30,10 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
        case NFPROTO_IPV4:
                ft->src_v4 = ctt->src.u3.in;
                ft->dst_v4 = ctt->dst.u3.in;
-               ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
                break;
        case NFPROTO_IPV6:
                ft->src_v6 = ctt->src.u3.in6;
                ft->dst_v6 = ctt->dst.u3.in6;
-               ft->mtu = ip6_dst_mtu_forward(dst);
                break;
        }
 
@@ -52,37 +41,24 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
        ft->l4proto = ctt->dst.protonum;
        ft->src_port = ctt->src.u.tcp.port;
        ft->dst_port = ctt->dst.u.tcp.port;
-
-       ft->iifidx = other_dst->dev->ifindex;
-       ft->dst_cache = dst;
 }
 
-struct flow_offload *
-flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
 {
-       struct flow_offload_entry *entry;
        struct flow_offload *flow;
 
        if (unlikely(nf_ct_is_dying(ct) ||
            !atomic_inc_not_zero(&ct->ct_general.use)))
                return NULL;
 
-       entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
-       if (!entry)
+       flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+       if (!flow)
                goto err_ct_refcnt;
 
-       flow = &entry->flow;
+       flow->ct = ct;
 
-       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
-               goto err_dst_cache_original;
-
-       if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
-               goto err_dst_cache_reply;
-
-       entry->ct = ct;
-
-       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
-       flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
+       flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
+       flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
 
        if (ct->status & IPS_SRC_NAT)
                flow->flags |= FLOW_OFFLOAD_SNAT;
@@ -91,10 +67,6 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
 
        return flow;
 
-err_dst_cache_reply:
-       dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
-err_dst_cache_original:
-       kfree(entry);
 err_ct_refcnt:
        nf_ct_put(ct);
 
@@ -102,6 +74,56 @@ flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
 }
 EXPORT_SYMBOL_GPL(flow_offload_alloc);
 
+static int flow_offload_fill_route(struct flow_offload *flow,
+                                  const struct nf_flow_route *route,
+                                  enum flow_offload_tuple_dir dir)
+{
+       struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+       struct dst_entry *other_dst = route->tuple[!dir].dst;
+       struct dst_entry *dst = route->tuple[dir].dst;
+
+       if (!dst_hold_safe(route->tuple[dir].dst))
+               return -1;
+
+       switch (flow_tuple->l3proto) {
+       case NFPROTO_IPV4:
+               flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
+               break;
+       case NFPROTO_IPV6:
+               flow_tuple->mtu = ip6_dst_mtu_forward(dst);
+               break;
+       }
+
+       flow_tuple->iifidx = other_dst->dev->ifindex;
+       flow_tuple->dst_cache = dst;
+
+       return 0;
+}
+
+int flow_offload_route_init(struct flow_offload *flow,
+                           const struct nf_flow_route *route)
+{
+       int err;
+
+       err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
+       if (err < 0)
+               return err;
+
+       err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
+       if (err < 0)
+               goto err_route_reply;
+
+       flow->type = NF_FLOW_OFFLOAD_ROUTE;
+
+       return 0;
+
+err_route_reply:
+       dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(flow_offload_route_init);
+
 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
 {
        tcp->state = TCP_CONNTRACK_ESTABLISHED;
@@ -150,17 +172,25 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
        flow_offload_fixup_ct_timeout(ct);
 }
 
-void flow_offload_free(struct flow_offload *flow)
+static void flow_offload_route_release(struct flow_offload *flow)
 {
-       struct flow_offload_entry *e;
-
        dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
        dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
-       e = container_of(flow, struct flow_offload_entry, flow);
+}
+
+void flow_offload_free(struct flow_offload *flow)
+{
+       switch (flow->type) {
+       case NF_FLOW_OFFLOAD_ROUTE:
+               flow_offload_route_release(flow);
+               break;
+       default:
+               break;
+       }
        if (flow->flags & FLOW_OFFLOAD_DYING)
-               nf_ct_delete(e->ct, 0, 0);
-       nf_ct_put(e->ct);
-       kfree_rcu(e, rcu_head);
+               nf_ct_delete(flow->ct, 0, 0);
+       nf_ct_put(flow->ct);
+       kfree_rcu(flow, rcu_head);
 }
 EXPORT_SYMBOL_GPL(flow_offload_free);
 
@@ -220,6 +250,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
+       if (flow_table->flags & NF_FLOWTABLE_HW_OFFLOAD)
+               nf_flow_offload_add(flow_table, flow);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
@@ -232,8 +265,6 @@ static inline bool nf_flow_has_expired(const struct flow_offload *flow)
 static void flow_offload_del(struct nf_flowtable *flow_table,
                             struct flow_offload *flow)
 {
-       struct flow_offload_entry *e;
-
        rhashtable_remove_fast(&flow_table->rhashtable,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
                               nf_flow_offload_rhash_params);
@@ -241,25 +272,21 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
                               &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
                               nf_flow_offload_rhash_params);
 
-       e = container_of(flow, struct flow_offload_entry, flow);
-       clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+       clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
 
        if (nf_flow_has_expired(flow))
-               flow_offload_fixup_ct(e->ct);
+               flow_offload_fixup_ct(flow->ct);
        else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
-               flow_offload_fixup_ct_timeout(e->ct);
+               flow_offload_fixup_ct_timeout(flow->ct);
 
        flow_offload_free(flow);
 }
 
 void flow_offload_teardown(struct flow_offload *flow)
 {
-       struct flow_offload_entry *e;
-
        flow->flags |= FLOW_OFFLOAD_TEARDOWN;
 
-       e = container_of(flow, struct flow_offload_entry, flow);
-       flow_offload_fixup_ct_state(e->ct);
+       flow_offload_fixup_ct_state(flow->ct);
 }
 EXPORT_SYMBOL_GPL(flow_offload_teardown);
 
@@ -269,7 +296,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
 {
        struct flow_offload_tuple_rhash *tuplehash;
        struct flow_offload *flow;
-       struct flow_offload_entry *e;
        int dir;
 
        tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
@@ -282,8 +308,7 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
        if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
                return NULL;
 
-       e = container_of(flow, struct flow_offload_entry, flow);
-       if (unlikely(nf_ct_is_dying(e->ct)))
+       if (unlikely(nf_ct_is_dying(flow->ct)))
                return NULL;
 
        return tuplehash;
@@ -327,12 +352,21 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
 {
        struct nf_flowtable *flow_table = data;
-       struct flow_offload_entry *e;
 
-       e = container_of(flow, struct flow_offload_entry, flow);
-       if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
-           (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
-               flow_offload_del(flow_table, flow);
+       if (flow->flags & FLOW_OFFLOAD_HW)
+               nf_flow_offload_stats(flow_table, flow);
+
+       if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
+           (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) {
+               if (flow->flags & FLOW_OFFLOAD_HW) {
+                       if (!(flow->flags & FLOW_OFFLOAD_HW_DYING))
+                               nf_flow_offload_del(flow_table, flow);
+                       else if (flow->flags & FLOW_OFFLOAD_HW_DEAD)
+                               flow_offload_del(flow_table, flow);
+               } else {
+                       flow_offload_del(flow_table, flow);
+               }
+       }
 }
 
 static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -465,6 +499,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
        int err;
 
        INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+       flow_block_init(&flowtable->flow_block);
 
        err = rhashtable_init(&flowtable->rhashtable,
                              &nf_flow_offload_rhash_params);
@@ -485,15 +520,13 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 {
        struct net_device *dev = data;
-       struct flow_offload_entry *e;
-
-       e = container_of(flow, struct flow_offload_entry, flow);
 
        if (!dev) {
                flow_offload_teardown(flow);
                return;
        }
-       if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
+
+       if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
            (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
             flow->tuplehash[1].tuple.iifidx == dev->ifindex))
                flow_offload_dead(flow);
@@ -502,6 +535,7 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
                                          struct net_device *dev)
 {
+       nf_flow_table_offload_flush(flowtable);
        nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
        flush_delayed_work(&flowtable->gc_work);
 }
@@ -529,5 +563,18 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
 
+static int __init nf_flow_table_module_init(void)
+{
+       return nf_flow_table_offload_init();
+}
+
+static void __exit nf_flow_table_module_exit(void)
+{
+       nf_flow_table_offload_exit();
+}
+
+module_init(nf_flow_table_module_init);
+module_exit(nf_flow_table_module_exit);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");