]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
netfilter: connlimit: use kmem_cache for conn objects
authorFlorian Westphal <fw@strlen.de>
Fri, 7 Mar 2014 13:37:12 +0000 (14:37 +0100)
committerPablo Neira Ayuso <pablo@netfilter.org>
Wed, 12 Mar 2014 12:55:03 +0000 (13:55 +0100)
We might allocate thousands of these (one object per connection).
Use distinct kmem cache to permit simplte tracking on how many
objects are currently used by the connlimit match via the sysfs.

Reviewed-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
net/netfilter/xt_connlimit.c

index 0220d406cbe0a35e278fad9c0c6b695763896896..a8eaabb03be93eba205435fbe0fd05dd4c4783b2 100644 (file)
@@ -44,6 +44,7 @@ struct xt_connlimit_data {
 };
 
 static u_int32_t connlimit_rnd __read_mostly;
+static struct kmem_cache *connlimit_conn_cachep __read_mostly;
 
 static inline unsigned int connlimit_iphash(__be32 addr)
 {
@@ -113,7 +114,7 @@ static int count_hlist(struct net *net,
                                                 &conn->tuple);
                if (found == NULL) {
                        hlist_del(&conn->node);
-                       kfree(conn);
+                       kmem_cache_free(connlimit_conn_cachep, conn);
                        continue;
                }
 
@@ -133,7 +134,7 @@ static int count_hlist(struct net *net,
                         */
                        nf_ct_put(found_ct);
                        hlist_del(&conn->node);
-                       kfree(conn);
+                       kmem_cache_free(connlimit_conn_cachep, conn);
                        continue;
                }
 
@@ -152,7 +153,9 @@ static bool add_hlist(struct hlist_head *head,
                      const struct nf_conntrack_tuple *tuple,
                      const union nf_inet_addr *addr)
 {
-       struct xt_connlimit_conn *conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
+       struct xt_connlimit_conn *conn;
+
+       conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
        if (conn == NULL)
                return false;
        conn->tuple = *tuple;
@@ -285,7 +288,7 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
        for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
                hlist_for_each_entry_safe(conn, n, &hash[i], node) {
                        hlist_del(&conn->node);
-                       kfree(conn);
+                       kmem_cache_free(connlimit_conn_cachep, conn);
                }
        }
 
@@ -305,12 +308,23 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
 
 static int __init connlimit_mt_init(void)
 {
-       return xt_register_match(&connlimit_mt_reg);
+       int ret;
+       connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
+                                          sizeof(struct xt_connlimit_conn),
+                                          0, 0, NULL);
+       if (!connlimit_conn_cachep)
+               return -ENOMEM;
+
+       ret = xt_register_match(&connlimit_mt_reg);
+       if (ret != 0)
+               kmem_cache_destroy(connlimit_conn_cachep);
+       return ret;
 }
 
 static void __exit connlimit_mt_exit(void)
 {
        xt_unregister_match(&connlimit_mt_reg);
+       kmem_cache_destroy(connlimit_conn_cachep);
 }
 
 module_init(connlimit_mt_init);