1
0
Fork 0

scsi: drivers: base: Support atomic version of attribute_container_device_trigger

attribute_container_device_trigger invokes callbacks that may fail for one
or more classdevs, for instance, the transport_add_class_device callback,
called during transport creation, does memory allocation.  This
information, though, is not propagated to upper layers, and any driver
using the attribute_container_device_trigger API will not know whether any,
some, or all callbacks succeeded.

This patch implements a safe version of this dispatcher, to either succeed
all the callbacks or revert to the original state.

Link: https://lore.kernel.org/r/20200106185817.640331-2-krisman@collabora.com
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
alistair/sensors
Gabriel Krisman Bertazi 2020-01-06 13:58:15 -05:00 committed by Martin K. Petersen
parent 54155ed419
commit 7c1ef33870
2 changed files with 110 additions and 0 deletions

View File

@ -236,6 +236,109 @@ attribute_container_remove_device(struct device *dev,
mutex_unlock(&attribute_container_mutex);
}
static int
do_attribute_container_device_trigger_safe(struct device *dev,
struct attribute_container *cont,
int (*fn)(struct attribute_container *,
struct device *, struct device *),
int (*undo)(struct attribute_container *,
struct device *, struct device *))
{
int ret;
struct internal_container *ic, *failed;
struct klist_iter iter;
if (attribute_container_no_classdevs(cont))
return fn(cont, dev, NULL);
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (dev == ic->classdev.parent) {
ret = fn(cont, dev, &ic->classdev);
if (ret) {
failed = ic;
klist_iter_exit(&iter);
goto fail;
}
}
}
return 0;
fail:
if (!undo)
return ret;
/* Attempt to undo the work partially done. */
klist_for_each_entry(ic, &cont->containers, node, &iter) {
if (ic == failed) {
klist_iter_exit(&iter);
break;
}
if (dev == ic->classdev.parent)
undo(cont, dev, &ic->classdev);
}
return ret;
}
/**
* attribute_container_device_trigger_safe - execute a trigger for each
* matching classdev or fail all of them.
*
* @dev: The generic device to run the trigger for
* @fn the function to execute for each classdev.
* @undo A function to undo the work previously done in case of error
*
* This function is a safe version of
* attribute_container_device_trigger. It stops on the first error and
* undo the partial work that has been done, on previous classdev. It
* is guaranteed that either they all succeeded, or none of them
* succeeded.
*/
int
attribute_container_device_trigger_safe(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *),
int (*undo)(struct attribute_container *,
struct device *,
struct device *))
{
struct attribute_container *cont, *failed = NULL;
int ret = 0;
mutex_lock(&attribute_container_mutex);
list_for_each_entry(cont, &attribute_container_list, node) {
if (!cont->match(cont, dev))
continue;
ret = do_attribute_container_device_trigger_safe(dev, cont,
fn, undo);
if (ret) {
failed = cont;
break;
}
}
if (ret && !WARN_ON(!undo)) {
list_for_each_entry(cont, &attribute_container_list, node) {
if (failed == cont)
break;
if (!cont->match(cont, dev))
continue;
do_attribute_container_device_trigger_safe(dev, cont,
undo, NULL);
}
}
mutex_unlock(&attribute_container_mutex);
return ret;
}
/**
* attribute_container_device_trigger - execute a trigger for each matching classdev
*

View File

@ -54,6 +54,13 @@ void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *));
int attribute_container_device_trigger_safe(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *),
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
void attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *));