1
0
Fork 0

greybus: make op_cycle atomic (again)

There's no need to protect updating a connections operation id cycle
counter with the operations spinlock.   That spinlock protects
connection lists, which do not interact with the cycle counter.
All that we require is that it gets updated atomically, and we
can express that requirement in its type.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: Greg Kroah-Hartman <greg@kroah.com>
hifive-unleashed-5.1
Alex Elder 2014-12-03 08:35:08 -06:00 committed by Greg Kroah-Hartman
parent afb2e1342e
commit 4afb7fd015
3 changed files with 5 additions and 4 deletions

View File

@ -191,6 +191,7 @@ struct gb_connection *gb_connection_create(struct gb_interface *interface,
list_add_tail(&connection->interface_links, &interface->connections);
spin_unlock_irq(&gb_connections_lock);
atomic_set(&connection->op_cycle, 0);
INIT_LIST_HEAD(&connection->operations);
return connection;

View File

@ -35,7 +35,7 @@ struct gb_connection {
enum gb_connection_state state;
u16 op_cycle;
atomic_t op_cycle;
struct list_head operations;
void *private;

View File

@ -640,6 +640,7 @@ int gb_operation_request_send(struct gb_operation *operation,
struct gb_connection *connection = operation->connection;
struct gb_operation_msg_hdr *header;
unsigned long timeout;
unsigned int cycle;
int ret;
if (connection->state != GB_CONNECTION_STATE_ENABLED)
@ -661,9 +662,8 @@ int gb_operation_request_send(struct gb_operation *operation,
* Assign the operation's id, and store it in the request header.
* Zero is a reserved operation id.
*/
spin_lock_irq(&gb_operations_lock);
operation->id = ++connection->op_cycle % U16_MAX + 1;
spin_unlock_irq(&gb_operations_lock);
cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
operation->id = (u16)(cycle % U16_MAX + 1);
header = operation->request->header;
header->operation_id = cpu_to_le16(operation->id);