]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
greybus: make op_cycle atomic (again)
authorAlex Elder <elder@linaro.org>
Wed, 3 Dec 2014 14:35:08 +0000 (08:35 -0600)
committerGreg Kroah-Hartman <greg@kroah.com>
Wed, 3 Dec 2014 23:05:58 +0000 (15:05 -0800)
There's no need to protect updating a connections operation id cycle
counter with the operations spinlock.   That spinlock protects
connection lists, which do not interact with the cycle counter.
All that we require is that it gets updated atomically, and we
can express that requirement in its type.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: Greg Kroah-Hartman <greg@kroah.com>
drivers/staging/greybus/connection.c
drivers/staging/greybus/connection.h
drivers/staging/greybus/operation.c

index 7fbfcdc22307fe458fee950bfcad1f2191b9065b..e59a7778c02ac93f5bef18a6ae4d1884de5633fb 100644 (file)
@@ -191,6 +191,7 @@ struct gb_connection *gb_connection_create(struct gb_interface *interface,
        list_add_tail(&connection->interface_links, &interface->connections);
        spin_unlock_irq(&gb_connections_lock);
 
+       atomic_set(&connection->op_cycle, 0);
        INIT_LIST_HEAD(&connection->operations);
 
        return connection;
index 035fced12edc3e6edc68bfa2aa9d8f7078074b3f..7568161e5dcb58b181522359b4f709ba67479353 100644 (file)
@@ -35,7 +35,7 @@ struct gb_connection {
 
        enum gb_connection_state        state;
 
-       u16                             op_cycle;
+       atomic_t                        op_cycle;
        struct list_head                operations;
 
        void                            *private;
index 6ed1d479b11720678217cc3537f44e7503709551..15a6e3b24a7c151075f16beb370cdfe7b6d612f8 100644 (file)
@@ -640,6 +640,7 @@ int gb_operation_request_send(struct gb_operation *operation,
        struct gb_connection *connection = operation->connection;
        struct gb_operation_msg_hdr *header;
        unsigned long timeout;
+       unsigned int cycle;
        int ret;
 
        if (connection->state != GB_CONNECTION_STATE_ENABLED)
@@ -661,9 +662,8 @@ int gb_operation_request_send(struct gb_operation *operation,
         * Assign the operation's id, and store it in the request header.
         * Zero is a reserved operation id.
         */
-       spin_lock_irq(&gb_operations_lock);
-       operation->id = ++connection->op_cycle % U16_MAX + 1;
-       spin_unlock_irq(&gb_operations_lock);
+       cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
+       operation->id = (u16)(cycle % U16_MAX + 1);
        header = operation->request->header;
        header->operation_id = cpu_to_le16(operation->id);