summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
authorGabriel Krisman Bertazi <krisman@collabora.com>2020-01-06 13:58:15 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2020-01-15 22:55:36 -0500
commit7c1ef338705fb5c53e6f574ae5eb19fdfacb3d26 (patch)
tree7f06aa8f71974f41bc720375382d1dfb77eb9ff6 /drivers/base
parent54155ed4199c7aa3fd20866648024ab63c96d579 (diff)
scsi: drivers: base: Support atomic version of attribute_container_device_trigger
attribute_container_device_trigger invokes callbacks that may fail for one or more classdevs, for instance, the transport_add_class_device callback, called during transport creation, does memory allocation. This information, though, is not propagated to upper layers, and any driver using the attribute_container_device_trigger API will not know whether any, some, or all callbacks succeeded. This patch implements a safe version of this dispatcher, to either succeed all the callbacks or revert to the original state. Link: https://lore.kernel.org/r/20200106185817.640331-2-krisman@collabora.com Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/attribute_container.c103
1 files changed, 103 insertions, 0 deletions
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 20736aaa0e69..f7bd0f4db13d 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev,
mutex_unlock(&attribute_container_mutex);
}
+static int
+do_attribute_container_device_trigger_safe(struct device *dev,
+ struct attribute_container *cont,
+ int (*fn)(struct attribute_container *,
+ struct device *, struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *, struct device *))
+{
+ int ret;
+ struct internal_container *ic, *failed;
+ struct klist_iter iter;
+
+ if (attribute_container_no_classdevs(cont))
+ return fn(cont, dev, NULL);
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev == ic->classdev.parent) {
+ ret = fn(cont, dev, &ic->classdev);
+ if (ret) {
+ failed = ic;
+ klist_iter_exit(&iter);
+ goto fail;
+ }
+ }
+ }
+ return 0;
+
+fail:
+ if (!undo)
+ return ret;
+
+ /* Attempt to undo the work partially done. */
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (ic == failed) {
+ klist_iter_exit(&iter);
+ break;
+ }
+ if (dev == ic->classdev.parent)
+ undo(cont, dev, &ic->classdev);
+ }
+ return ret;
+}
+
+/**
+ * attribute_container_device_trigger_safe - execute a trigger for each
+ * matching classdev or fail all of them.
+ *
+ * @dev: The generic device to run the trigger for
+ * @fn the function to execute for each classdev.
+ * @undo A function to undo the work previously done in case of error
+ *
+ * This function is a safe version of
+ * attribute_container_device_trigger. It stops on the first error and
+ * undo the partial work that has been done, on previous classdev. It
+ * is guaranteed that either they all succeeded, or none of them
+ * succeeded.
+ */
+int
+attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont, *failed = NULL;
+ int ret = 0;
+
+ mutex_lock(&attribute_container_mutex);
+
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ ret = do_attribute_container_device_trigger_safe(dev, cont,
+ fn, undo);
+ if (ret) {
+ failed = cont;
+ break;
+ }
+ }
+
+ if (ret && !WARN_ON(!undo)) {
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (failed == cont)
+ break;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ do_attribute_container_device_trigger_safe(dev, cont,
+ undo, NULL);
+ }
+ }
+
+ mutex_unlock(&attribute_container_mutex);
+ return ret;
+
+}
+
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*