summaryrefslogtreecommitdiff
path: root/drivers/staging/greybus/operation.c
diff options
context:
space:
mode:
authorJohan Hovold <johan@hovoldconsulting.com>2015-07-23 10:50:03 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2015-07-23 12:55:25 -0700
commit701615f82f29d0cce28a397267f847de3ee93b6e (patch)
tree62c834ecdb098d31e62effca7e807aff0513744e /drivers/staging/greybus/operation.c
parent5a5bc354c65d9e5f255f1861212ec5fa4852a891 (diff)
greybus: operation: add completion work queue
Add dedicated bound work queue for operation completions and use the connection work queues for incoming requests only. There is no need to keep responses ordered internally or with respect to requests. Instead allow operations to complete as soon as possible when a response arrives (or the operation is cancelled). Note that this also allows synchronous requests to be submitted from request handlers as responses will no longer be blocked on the same single-threaded work queue. Similarly, operations can now also be cancelled from a request handler. Tested-by: Rui Miguel Silva <rui.silva@linaro.org> Signed-off-by: Johan Hovold <johan@hovoldconsulting.com> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'drivers/staging/greybus/operation.c')
-rw-r--r--drivers/staging/greybus/operation.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
index 69218aa4b7f1..4e9c4a896365 100644
--- a/drivers/staging/greybus/operation.c
+++ b/drivers/staging/greybus/operation.c
@@ -19,6 +19,9 @@
static struct kmem_cache *gb_operation_cache;
static struct kmem_cache *gb_message_cache;
+/* Workqueue to handle Greybus operation completions. */
+static struct workqueue_struct *gb_operation_completion_wq;
+
/* Wait queue for synchronous cancellations. */
static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
@@ -796,8 +799,10 @@ void greybus_message_sent(struct greybus_host_device *hd,
gb_operation_put_active(operation);
gb_operation_put(operation);
} else if (status) {
- if (gb_operation_result_set(operation, status))
- queue_work(connection->wq, &operation->work);
+ if (gb_operation_result_set(operation, status)) {
+ queue_work(gb_operation_completion_wq,
+ &operation->work);
+ }
}
}
EXPORT_SYMBOL_GPL(greybus_message_sent);
@@ -874,7 +879,7 @@ static void gb_connection_recv_response(struct gb_connection *connection,
/* The rest will be handled in work queue context */
if (gb_operation_result_set(operation, errno)) {
memcpy(message->header, data, size);
- queue_work(connection->wq, &operation->work);
+ queue_work(gb_operation_completion_wq, &operation->work);
}
gb_operation_put(operation);
@@ -928,14 +933,12 @@ void gb_connection_recv(struct gb_connection *connection,
*/
void gb_operation_cancel(struct gb_operation *operation, int errno)
{
- struct gb_connection *connection = operation->connection;
-
if (WARN_ON(gb_operation_is_incoming(operation)))
return;
if (gb_operation_result_set(operation, errno)) {
gb_message_cancel(operation->request);
- queue_work(connection->wq, &operation->work);
+ queue_work(gb_operation_completion_wq, &operation->work);
}
atomic_inc(&operation->waiters);
@@ -1042,8 +1045,16 @@ int __init gb_operation_init(void)
if (!gb_operation_cache)
goto err_destroy_message_cache;
+ gb_operation_completion_wq = alloc_workqueue("greybus_completion",
+ 0, 0);
+ if (!gb_operation_completion_wq)
+ goto err_destroy_operation_cache;
+
return 0;
+err_destroy_operation_cache:
+ kmem_cache_destroy(gb_operation_cache);
+ gb_operation_cache = NULL;
err_destroy_message_cache:
kmem_cache_destroy(gb_message_cache);
gb_message_cache = NULL;
@@ -1053,6 +1064,8 @@ err_destroy_message_cache:
void gb_operation_exit(void)
{
+ destroy_workqueue(gb_operation_completion_wq);
+ gb_operation_completion_wq = NULL;
kmem_cache_destroy(gb_operation_cache);
gb_operation_cache = NULL;
kmem_cache_destroy(gb_message_cache);