summaryrefslogtreecommitdiff
path: root/drivers/staging/greybus/operation.c
diff options
context:
space:
mode:
authorJohan Hovold <johan@hovoldconsulting.com>2015-07-23 10:50:02 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2015-07-23 12:55:25 -0700
commit5a5bc354c65d9e5f255f1861212ec5fa4852a891 (patch)
treefc5646c6483bd0f097b71bb8a64cdfc0048195d7 /drivers/staging/greybus/operation.c
parent10f9fa133a295fa0f2f36c1a481b7e8b82b12891 (diff)
greybus: operation: use per-connection work queues
Replace the global operation work queue with per-connection work queues. There is no need to keep operations strictly ordered across connections, something which only adds unnecessary latency. Tested-by: Rui Miguel Silva <rui.silva@linaro.org> Signed-off-by: Johan Hovold <johan@hovoldconsulting.com> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'drivers/staging/greybus/operation.c')
-rw-r--r--drivers/staging/greybus/operation.c24
1 files changed, 7 insertions, 17 deletions
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 6b87bcd63615..69218aa4b7f1 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -19,9 +19,6 @@
static struct kmem_cache *gb_operation_cache;
static struct kmem_cache *gb_message_cache;
-/* Workqueue to handle Greybus operation completions. */
-static struct workqueue_struct *gb_operation_workqueue;
-
/* Wait queue for synchronous cancellations. */
static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
@@ -800,7 +797,7 @@ void greybus_message_sent(struct greybus_host_device *hd,
gb_operation_put(operation);
} else if (status) {
if (gb_operation_result_set(operation, status))
- queue_work(gb_operation_workqueue, &operation->work);
+ queue_work(connection->wq, &operation->work);
}
}
EXPORT_SYMBOL_GPL(greybus_message_sent);
@@ -837,7 +834,7 @@ static void gb_connection_recv_request(struct gb_connection *connection,
* request handler returns.
*/
if (gb_operation_result_set(operation, -EINPROGRESS))
- queue_work(gb_operation_workqueue, &operation->work);
+ queue_work(connection->wq, &operation->work);
}
/*
@@ -877,7 +874,7 @@ static void gb_connection_recv_response(struct gb_connection *connection,
/* The rest will be handled in work queue context */
if (gb_operation_result_set(operation, errno)) {
memcpy(message->header, data, size);
- queue_work(gb_operation_workqueue, &operation->work);
+ queue_work(connection->wq, &operation->work);
}
gb_operation_put(operation);
@@ -931,12 +928,14 @@ void gb_connection_recv(struct gb_connection *connection,
*/
void gb_operation_cancel(struct gb_operation *operation, int errno)
{
+ struct gb_connection *connection = operation->connection;
+
if (WARN_ON(gb_operation_is_incoming(operation)))
return;
if (gb_operation_result_set(operation, errno)) {
gb_message_cancel(operation->request);
- queue_work(gb_operation_workqueue, &operation->work);
+ queue_work(connection->wq, &operation->work);
}
atomic_inc(&operation->waiters);
@@ -1043,15 +1042,8 @@ int __init gb_operation_init(void)
if (!gb_operation_cache)
goto err_destroy_message_cache;
- gb_operation_workqueue = alloc_workqueue("greybus_operation",
- WQ_UNBOUND, 1);
- if (!gb_operation_workqueue)
- goto err_operation;
-
return 0;
-err_operation:
- kmem_cache_destroy(gb_operation_cache);
- gb_operation_cache = NULL;
+
err_destroy_message_cache:
kmem_cache_destroy(gb_message_cache);
gb_message_cache = NULL;
@@ -1061,8 +1053,6 @@ err_destroy_message_cache:
void gb_operation_exit(void)
{
- destroy_workqueue(gb_operation_workqueue);
- gb_operation_workqueue = NULL;
kmem_cache_destroy(gb_operation_cache);
gb_operation_cache = NULL;
kmem_cache_destroy(gb_message_cache);