]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
rxrpc: abstract away knowledge of IDR internals
authorMatthew Wilcox <mawilcox@microsoft.com>
Wed, 14 Dec 2016 23:09:19 +0000 (15:09 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 15 Dec 2016 00:04:10 +0000 (16:04 -0800)
Add idr_get_cursor() / idr_set_cursor() APIs, and remove the reference
to IDR_SIZE.

Link: http://lkml.kernel.org/r/1480369871-5271-65-git-send-email-mawilcox@linuxonhyperv.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Tested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/idr.h
net/rxrpc/af_rxrpc.c
net/rxrpc/conn_client.c

index 3639a28188c9218ff35fb5f36aee54778b40350e..1eb755f77f2f2d7dc557f8971c8ccff11f07a1b5 100644 (file)
@@ -55,6 +55,32 @@ struct idr {
 }
 #define DEFINE_IDR(name)       struct idr name = IDR_INIT(name)
 
+/**
+ * idr_get_cursor - Return the current position of the cyclic allocator
+ * @idr: idr handle
+ *
+ * The value returned is the value that will be next returned from
+ * idr_alloc_cyclic() if it is free (otherwise the search will start from
+ * this position).
+ */
+static inline unsigned int idr_get_cursor(struct idr *idr)
+{
+       return READ_ONCE(idr->cur);
+}
+
+/**
+ * idr_set_cursor - Set the current position of the cyclic allocator
+ * @idr: idr handle
+ * @val: new position
+ *
+ * The next call to idr_alloc_cyclic() will return @val if it is free
+ * (otherwise the search will start from this position).
+ */
+static inline void idr_set_cursor(struct idr *idr, unsigned int val)
+{
+       WRITE_ONCE(idr->cur, val);
+}
+
 /**
  * DOC: idr sync
  * idr synchronization (stolen from radix-tree.h)
index 2d59c9be40e1b53976497f92b28b54acb411e20e..5f63f6dcaabb6422306895f01ff7dc3f0de8ee60 100644 (file)
@@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = {
 static int __init af_rxrpc_init(void)
 {
        int ret = -1;
+       unsigned int tmp;
 
        BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
 
        get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
        rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
-       get_random_bytes(&rxrpc_client_conn_ids.cur,
-                        sizeof(rxrpc_client_conn_ids.cur));
-       rxrpc_client_conn_ids.cur &= 0x3fffffff;
-       if (rxrpc_client_conn_ids.cur == 0)
-               rxrpc_client_conn_ids.cur = 1;
+       get_random_bytes(&tmp, sizeof(tmp));
+       tmp &= 0x3fffffff;
+       if (tmp == 0)
+               tmp = 1;
+       idr_set_cursor(&rxrpc_client_conn_ids, tmp);
 
        ret = -ENOMEM;
        rxrpc_call_jar = kmem_cache_create(
index 60ef9605167ef8b79dd850e2cc5932dfc3ed3e3c..6cbcdcc298534a4704f6383c3486ce6b82f432c9 100644 (file)
@@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
         * times the maximum number of client conns away from the current
         * allocation point to try and keep the IDs concentrated.
         */
-       id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur);
+       id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
        id = conn->proto.cid >> RXRPC_CIDSHIFT;
        distance = id - id_cursor;
        if (distance < 0)
                distance = -distance;
-       limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4;
+       limit = max(rxrpc_max_client_connections * 4, 1024U);
        if (distance > limit)
                goto mark_dont_reuse;