1
0
Fork 0

greybus: operation: use per-connection work queues

Replace the global operation work queue with per-connection work queues.

There is no need to keep operations strictly ordered across connections,
something which only adds unnecessary latency.

Tested-by: Rui Miguel Silva <rui.silva@linaro.org>
Signed-off-by: Johan Hovold <johan@hovoldconsulting.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
hifive-unleashed-5.1
Johan Hovold 2015-07-23 10:50:02 +02:00 committed by Greg Kroah-Hartman
parent 10f9fa133a
commit 5a5bc354c6
3 changed files with 19 additions and 17 deletions

View File

@ -7,6 +7,8 @@
* Released under the GPLv2 only.
*/
#include <linux/workqueue.h>
#include "greybus.h"
static DEFINE_SPINLOCK(gb_connections_lock);
@ -99,6 +101,7 @@ static void gb_connection_release(struct device *dev)
{
struct gb_connection *connection = to_gb_connection(dev);
destroy_workqueue(connection->wq);
kfree(connection);
}
@ -190,6 +193,11 @@ gb_connection_create_range(struct greybus_host_device *hd,
spin_lock_init(&connection->lock);
INIT_LIST_HEAD(&connection->operations);
connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
dev_name(parent), cport_id);
if (!connection->wq)
goto err_free_connection;
connection->dev.parent = parent;
connection->dev.bus = &greybus_bus_type;
connection->dev.type = &greybus_connection_type;
@ -227,6 +235,8 @@ gb_connection_create_range(struct greybus_host_device *hd,
return connection;
err_free_connection:
kfree(connection);
err_remove_ida:
ida_simple_remove(id_map, hd_cport_id);

View File

@ -39,6 +39,8 @@ struct gb_connection {
enum gb_connection_state state;
struct list_head operations;
struct workqueue_struct *wq;
atomic_t op_cycle;
void *private;

View File

@ -19,9 +19,6 @@
static struct kmem_cache *gb_operation_cache;
static struct kmem_cache *gb_message_cache;
/* Workqueue to handle Greybus operation completions. */
static struct workqueue_struct *gb_operation_workqueue;
/* Wait queue for synchronous cancellations. */
static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
@ -800,7 +797,7 @@ void greybus_message_sent(struct greybus_host_device *hd,
gb_operation_put(operation);
} else if (status) {
if (gb_operation_result_set(operation, status))
queue_work(gb_operation_workqueue, &operation->work);
queue_work(connection->wq, &operation->work);
}
}
EXPORT_SYMBOL_GPL(greybus_message_sent);
@ -837,7 +834,7 @@ static void gb_connection_recv_request(struct gb_connection *connection,
* request handler returns.
*/
if (gb_operation_result_set(operation, -EINPROGRESS))
queue_work(gb_operation_workqueue, &operation->work);
queue_work(connection->wq, &operation->work);
}
/*
@ -877,7 +874,7 @@ static void gb_connection_recv_response(struct gb_connection *connection,
/* The rest will be handled in work queue context */
if (gb_operation_result_set(operation, errno)) {
memcpy(message->header, data, size);
queue_work(gb_operation_workqueue, &operation->work);
queue_work(connection->wq, &operation->work);
}
gb_operation_put(operation);
@ -931,12 +928,14 @@ void gb_connection_recv(struct gb_connection *connection,
*/
void gb_operation_cancel(struct gb_operation *operation, int errno)
{
struct gb_connection *connection = operation->connection;
if (WARN_ON(gb_operation_is_incoming(operation)))
return;
if (gb_operation_result_set(operation, errno)) {
gb_message_cancel(operation->request);
queue_work(gb_operation_workqueue, &operation->work);
queue_work(connection->wq, &operation->work);
}
atomic_inc(&operation->waiters);
@ -1043,15 +1042,8 @@ int __init gb_operation_init(void)
if (!gb_operation_cache)
goto err_destroy_message_cache;
gb_operation_workqueue = alloc_workqueue("greybus_operation",
WQ_UNBOUND, 1);
if (!gb_operation_workqueue)
goto err_operation;
return 0;
err_operation:
kmem_cache_destroy(gb_operation_cache);
gb_operation_cache = NULL;
err_destroy_message_cache:
kmem_cache_destroy(gb_message_cache);
gb_message_cache = NULL;
@ -1061,8 +1053,6 @@ err_destroy_message_cache:
void gb_operation_exit(void)
{
destroy_workqueue(gb_operation_workqueue);
gb_operation_workqueue = NULL;
kmem_cache_destroy(gb_operation_cache);
gb_operation_cache = NULL;
kmem_cache_destroy(gb_message_cache);