]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
net: Replace ip_ra_lock with per-net mutex
authorKirill Tkhai <ktkhai@virtuozzo.com>
Thu, 22 Mar 2018 09:45:40 +0000 (12:45 +0300)
committerDavid S. Miller <davem@davemloft.net>
Thu, 22 Mar 2018 19:12:56 +0000 (15:12 -0400)
Since ra_chain is per-net, we may use per-net mutexes
to protect them in ip_ra_control(). This improves
scalability.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/netns/ipv4.h
net/core/net_namespace.c
net/ipv4/ip_sockglue.c

index 97d7ee6667c74fcff829bcb61fe7b02963bcb741..8491bc9c86b1553ab603e4363e8e38ca7ff547e0 100644 (file)
@@ -50,6 +50,7 @@ struct netns_ipv4 {
        struct ipv4_devconf     *devconf_all;
        struct ipv4_devconf     *devconf_dflt;
        struct ip_ra_chain __rcu *ra_chain;
+       struct mutex            ra_mutex;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
        struct fib_rules_ops    *rules_ops;
        bool                    fib_has_custom_rules;
index c340d5cfbdecd4dab11a863c988eb24eebaa88d5..95ba2c53bd9a280433959d22d60fa36b784cbe97 100644 (file)
@@ -301,6 +301,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
        spin_lock_init(&net->nsid_lock);
+       mutex_init(&net->ipv4.ra_mutex);
 
        list_for_each_entry(ops, &pernet_list, list) {
                error = ops_init(ops, net);
index f36d35fe924bb3c85f75ccbaa27f933d2ac83475..5ad2d8ed3a3fe2aa51d814af442df7ff5e074d3e 100644 (file)
@@ -322,9 +322,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
        return 0;
 }
 
-static DEFINE_SPINLOCK(ip_ra_lock);
-
-
 static void ip_ra_destroy_rcu(struct rcu_head *head)
 {
        struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
@@ -345,21 +342,21 @@ int ip_ra_control(struct sock *sk, unsigned char on,
 
        new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
 
-       spin_lock_bh(&ip_ra_lock);
+       mutex_lock(&net->ipv4.ra_mutex);
        for (rap = &net->ipv4.ra_chain;
             (ra = rcu_dereference_protected(*rap,
-                       lockdep_is_held(&ip_ra_lock))) != NULL;
+                       lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
             rap = &ra->next) {
                if (ra->sk == sk) {
                        if (on) {
-                               spin_unlock_bh(&ip_ra_lock);
+                               mutex_unlock(&net->ipv4.ra_mutex);
                                kfree(new_ra);
                                return -EADDRINUSE;
                        }
                        /* dont let ip_call_ra_chain() use sk again */
                        ra->sk = NULL;
                        RCU_INIT_POINTER(*rap, ra->next);
-                       spin_unlock_bh(&ip_ra_lock);
+                       mutex_unlock(&net->ipv4.ra_mutex);
 
                        if (ra->destructor)
                                ra->destructor(sk);
@@ -374,7 +371,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
                }
        }
        if (!new_ra) {
-               spin_unlock_bh(&ip_ra_lock);
+               mutex_unlock(&net->ipv4.ra_mutex);
                return -ENOBUFS;
        }
        new_ra->sk = sk;
@@ -383,7 +380,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
        RCU_INIT_POINTER(new_ra->next, ra);
        rcu_assign_pointer(*rap, new_ra);
        sock_hold(sk);
-       spin_unlock_bh(&ip_ra_lock);
+       mutex_unlock(&net->ipv4.ra_mutex);
 
        return 0;
 }