1
0
Fork 0

Merge remote-tracking branch 'gregkh-driver-core/driver-core-next' into arm/exynos

hifive-unleashed-5.1
Joerg Roedel 2016-11-14 17:11:20 +01:00
commit c8b88a9075
20 changed files with 1381 additions and 48 deletions

View File

@ -0,0 +1,12 @@
What: /sys/devices/.../deferred_probe
Date: August 2016
Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
Description:
The /sys/devices/.../deferred_probe attribute is
present for all devices. If a driver detects during
probing a device that a related device is not yet
ready, it may defer probing of the first device. The
kernel will retry probing the first device after any
other device is successfully probed. This attribute
reads as 1 if probing of this device is currently
deferred, or 0 otherwise.

View File

@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
this_cpu_ci->cpu_map_populated = true;
return 0;
}

View File

@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
source "drivers/base/test/Kconfig"
config SYS_HYPERVISOR
bool
default n

View File

@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
obj-y += test/
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG

View File

@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
extern void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
extern struct device_attribute dev_attr_deferred_probe;
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
/* Device links support */
extern int device_links_read_lock(void);
extern void device_links_read_unlock(int idx);
extern int device_links_check_suppliers(struct device *dev);
extern void device_links_driver_bound(struct device *dev);
extern void device_links_driver_cleanup(struct device *dev);
extern void device_links_no_driver(struct device *dev);
extern bool device_links_busy(struct device *dev);
extern void device_links_unbind_consumers(struct device *dev);

View File

@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
/* OF properties to query for a given cache type */
struct cache_type_info {
const char *size_prop;
const char *line_size_props[2];
const char *nr_sets_prop;
};
static const struct cache_type_info cache_type_info[] = {
{
.size_prop = "cache-size",
.line_size_props = { "cache-line-size",
"cache-block-size", },
.nr_sets_prop = "cache-sets",
}, {
.size_prop = "i-cache-size",
.line_size_props = { "i-cache-line-size",
"i-cache-block-size", },
.nr_sets_prop = "i-cache-sets",
}, {
.size_prop = "d-cache-size",
.line_size_props = { "d-cache-line-size",
"d-cache-block-size", },
.nr_sets_prop = "d-cache-sets",
},
};
static inline int get_cacheinfo_idx(enum cache_type type)
{
if (type == CACHE_TYPE_UNIFIED)
return 0;
return type;
}
static void cache_size(struct cacheinfo *this_leaf)
{
const char *propname;
const __be32 *cache_size;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
cache_size = of_get_property(this_leaf->of_node, propname, NULL);
if (cache_size)
this_leaf->size = of_read_number(cache_size, 1);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
static void cache_get_line_size(struct cacheinfo *this_leaf)
{
const __be32 *line_size;
int i, lim, ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
for (i = 0; i < lim; i++) {
const char *propname;
propname = cache_type_info[ct_idx].line_size_props[i];
line_size = of_get_property(this_leaf->of_node, propname, NULL);
if (line_size)
break;
}
if (line_size)
this_leaf->coherency_line_size = of_read_number(line_size, 1);
}
static void cache_nr_sets(struct cacheinfo *this_leaf)
{
const char *propname;
const __be32 *nr_sets;
int ct_idx;
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
if (nr_sets)
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
}
static void cache_associativity(struct cacheinfo *this_leaf)
{
unsigned int line_size = this_leaf->coherency_line_size;
unsigned int nr_sets = this_leaf->number_of_sets;
unsigned int size = this_leaf->size;
/*
* If the cache is fully associative, there is no need to
* check the other properties.
*/
if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
}
static void cache_of_override_properties(unsigned int cpu)
{
int index;
struct cacheinfo *this_leaf;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = this_cpu_ci->info_list + index;
cache_size(this_leaf);
cache_get_line_size(this_leaf);
cache_nr_sets(this_leaf);
cache_associativity(this_leaf);
}
}
#else
static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
int ret;
int ret = 0;
ret = cache_setup_of_node(cpu);
if (this_cpu_ci->cpu_map_populated)
return 0;
if (of_have_populated_dt())
ret = cache_setup_of_node(cpu);
else if (!acpi_disabled)
/* No cache property/hierarchy support yet in ACPI */
ret = -ENOTSUPP;
if (ret)
return ret;
@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
static void cache_override_properties(unsigned int cpu)
{
if (of_have_populated_dt())
return cache_of_override_properties(cpu);
}
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
cpu);
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
cache_override_properties(cpu);
return 0;
free_ci:

View File

@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
/* Device links support. */
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
static inline void device_links_write_lock(void)
{
mutex_lock(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
mutex_unlock(&device_links_lock);
}
int device_links_read_lock(void)
{
return srcu_read_lock(&device_links_srcu);
}
void device_links_read_unlock(int idx)
{
srcu_read_unlock(&device_links_srcu, idx);
}
#else /* !CONFIG_SRCU */
static DECLARE_RWSEM(device_links_lock);
static inline void device_links_write_lock(void)
{
down_write(&device_links_lock);
}
static inline void device_links_write_unlock(void)
{
up_write(&device_links_lock);
}
int device_links_read_lock(void)
{
down_read(&device_links_lock);
return 0;
}
void device_links_read_unlock(int not_used)
{
up_read(&device_links_lock);
}
#endif /* !CONFIG_SRCU */
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
* @target: Device to check against.
*
* Check if @target depends on @dev or any device dependent on it (its child or
* its consumer etc). Return 1 if that is the case or 0 otherwise.
*/
static int device_is_dependent(struct device *dev, void *target)
{
struct device_link *link;
int ret;
if (WARN_ON(dev == target))
return 1;
ret = device_for_each_child(dev, target, device_is_dependent);
if (ret)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (WARN_ON(link->consumer == target))
return 1;
ret = device_is_dependent(link->consumer, target);
if (ret)
break;
}
return ret;
}
static int device_reorder_to_tail(struct device *dev, void *not_used)
{
struct device_link *link;
/*
* Devices that have not been registered yet will be put to the ends
* of the lists during the registration, so skip them here.
*/
if (device_is_registered(dev))
devices_kset_move_last(dev);
if (device_pm_initialized(dev))
device_pm_move_last(dev);
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node)
device_reorder_to_tail(link->consumer, NULL);
return 0;
}
/**
* device_link_add - Create a link between two devices.
* @consumer: Consumer end of the link.
* @supplier: Supplier end of the link.
* @flags: Link flags.
*
* The caller is responsible for the proper synchronization of the link creation
* with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
* runtime PM framework to take the link into account. Second, if the
* DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
* be forced into the active metastate and reference-counted upon the creation
* of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
* ignored.
*
* If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
* when the consumer device driver unbinds from it. The combination of both
* DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
* to be returned.
*
* A side effect of the link creation is re-ordering of dpm_list and the
* devices_kset list by moving the consumer device and all devices depending
* on it to the ends of these lists (that does not happen to devices that have
* not been registered when this function is called).
*
* The supplier device is required to be registered when this function is called
* and NULL will be returned if that is not the case. The consumer device need
* not be registerd, however.
*/
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags)
{
struct device_link *link;
if (!consumer || !supplier ||
((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
return NULL;
device_links_write_lock();
device_pm_lock();
/*
* If the supplier has not been fully registered yet or there is a
* reverse dependency between the consumer and the supplier already in
* the graph, return NULL.
*/
if (!device_pm_initialized(supplier)
|| device_is_dependent(consumer, supplier)) {
link = NULL;
goto out;
}
list_for_each_entry(link, &supplier->links.consumers, s_node)
if (link->consumer == consumer)
goto out;
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
goto out;
if (flags & DL_FLAG_PM_RUNTIME) {
if (flags & DL_FLAG_RPM_ACTIVE) {
if (pm_runtime_get_sync(supplier) < 0) {
pm_runtime_put_noidle(supplier);
kfree(link);
link = NULL;
goto out;
}
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
}
get_device(supplier);
link->supplier = supplier;
INIT_LIST_HEAD(&link->s_node);
get_device(consumer);
link->consumer = consumer;
INIT_LIST_HEAD(&link->c_node);
link->flags = flags;
/* Deterine the initial link state. */
if (flags & DL_FLAG_STATELESS) {
link->status = DL_STATE_NONE;
} else {
switch (supplier->links.status) {
case DL_DEV_DRIVER_BOUND:
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
* Balance the decrementation of the supplier's
* runtime PM usage counter after consumer probe
* in driver_probe_device().
*/
if (flags & DL_FLAG_PM_RUNTIME)
pm_runtime_get_sync(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;
case DL_DEV_DRIVER_BOUND:
link->status = DL_STATE_ACTIVE;
break;
default:
link->status = DL_STATE_AVAILABLE;
break;
}
break;
case DL_DEV_UNBINDING:
link->status = DL_STATE_SUPPLIER_UNBIND;
break;
default:
link->status = DL_STATE_DORMANT;
break;
}
}
/*
* Move the consumer and all of the devices depending on it to the end
* of dpm_list and the devices_kset list.
*
* It is necessary to hold dpm_list locked throughout all that or else
* we may end up suspending with a wrong ordering of it.
*/
device_reorder_to_tail(consumer, NULL);
list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
out:
device_pm_unlock();
device_links_write_unlock();
return link;
}
EXPORT_SYMBOL_GPL(device_link_add);
static void device_link_free(struct device_link *link)
{
put_device(link->consumer);
put_device(link->supplier);
kfree(link);
}
#ifdef CONFIG_SRCU
static void __device_link_free_srcu(struct rcu_head *rhead)
{
device_link_free(container_of(rhead, struct device_link, rcu_head));
}
static void __device_link_del(struct device_link *link)
{
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_drop_link(link->consumer);
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
}
#else /* !CONFIG_SRCU */
static void __device_link_del(struct device_link *link)
{
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
list_del(&link->s_node);
list_del(&link->c_node);
device_link_free(link);
}
#endif /* !CONFIG_SRCU */
/**
* device_link_del - Delete a link between two devices.
* @link: Device link to delete.
*
* The caller must ensure proper synchronization of this function with runtime
* PM.
*/
void device_link_del(struct device_link *link)
{
device_links_write_lock();
device_pm_lock();
__device_link_del(link);
device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
static void device_links_missing_supplier(struct device *dev)
{
struct device_link *link;
list_for_each_entry(link, &dev->links.suppliers, c_node)
if (link->status == DL_STATE_CONSUMER_PROBE)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
*
* Check links from this device to any suppliers. Walk the list of the device's
* links to suppliers and see if all of them are available. If not, simply
* return -EPROBE_DEFER.
*
* We need to guarantee that the supplier will not go away after the check has
* been positive here. It only can go away in __device_release_driver() and
* that function checks the device's links to consumers. This means we need to
* mark the link as "consumer probe in progress" to make the supplier removal
* wait for us to complete (or bad things may happen).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0;
device_links_write_lock();
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status != DL_STATE_AVAILABLE) {
device_links_missing_supplier(dev);
ret = -EPROBE_DEFER;
break;
}
WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
}
dev->links.status = DL_DEV_PROBING;
device_links_write_unlock();
return ret;
}
/**
* device_links_driver_bound - Update device links after probing its driver.
* @dev: Device to update the links for.
*
* The probe has been successful, so update links from this device to any
* consumers by changing their status to "available".
*
* Also change the status of @dev's links to suppliers to "active".
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_bound(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_DORMANT);
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
WRITE_ONCE(link->status, DL_STATE_ACTIVE);
}
dev->links.status = DL_DEV_DRIVER_BOUND;
device_links_write_unlock();
}
/**
* __device_links_no_driver - Update links of a device without a driver.
* @dev: Device without a drvier.
*
* Delete all non-persistent links from this device to any suppliers.
*
* Persistent links stay around, but their status is changed to "available",
* unless they already are in the "supplier unbind in progress" state in which
* case they need not be updated.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
static void __device_links_no_driver(struct device *dev)
{
struct device_link *link, *ln;
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->flags & DL_FLAG_AUTOREMOVE)
__device_link_del(link);
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
dev->links.status = DL_DEV_NO_DRIVER;
}
void device_links_no_driver(struct device *dev)
{
device_links_write_lock();
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_driver_cleanup - Update links after driver removal.
* @dev: Device whose driver has just gone away.
*
* Update links to consumers for @dev by changing their status to "dormant" and
* invoke %__device_links_no_driver() to update links to suppliers for it as
* appropriate.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_driver_cleanup(struct device *dev)
{
struct device_link *link;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
__device_links_no_driver(dev);
device_links_write_unlock();
}
/**
* device_links_busy - Check if there are any busy links to consumers.
* @dev: Device to check.
*
* Check each consumer of the device and return 'true' if its link's status
* is one of "consumer probe" or "active" (meaning that the given consumer is
* probing right now or its driver is present). Otherwise, change the link
* state to "supplier unbind" to prevent the consumer from being probed
* successfully going forward.
*
* Return 'false' if there are no probing or active consumers.
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
bool device_links_busy(struct device *dev)
{
struct device_link *link;
bool ret = false;
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->status == DL_STATE_CONSUMER_PROBE
|| link->status == DL_STATE_ACTIVE) {
ret = true;
break;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
}
dev->links.status = DL_DEV_UNBINDING;
device_links_write_unlock();
return ret;
}
/**
* device_links_unbind_consumers - Force unbind consumers of the given device.
* @dev: Device to unbind the consumers of.
*
* Walk the list of links to consumers for @dev and if any of them is in the
* "consumer probe" state, wait for all device probes in progress to complete
* and start over.
*
* If that's not the case, change the status of the link to "supplier unbind"
* and check if the link was in the "active" state. If so, force the consumer
* driver to unbind and start over (the consumer will not re-probe as we have
* changed the state of the link already).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*/
void device_links_unbind_consumers(struct device *dev)
{
struct device_link *link;
start:
device_links_write_lock();
list_for_each_entry(link, &dev->links.consumers, s_node) {
enum device_link_state status;
if (link->flags & DL_FLAG_STATELESS)
continue;
status = link->status;
if (status == DL_STATE_CONSUMER_PROBE) {
device_links_write_unlock();
wait_for_device_probe();
goto start;
}
WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
if (status == DL_STATE_ACTIVE) {
struct device *consumer = link->consumer;
get_device(consumer);
device_links_write_unlock();
device_release_driver_internal(consumer, NULL,
consumer->parent);
put_device(consumer);
goto start;
}
}
device_links_write_unlock();
}
/**
* device_links_purge - Delete existing links to other devices.
* @dev: Target device.
*/
static void device_links_purge(struct device *dev)
{
struct device_link *link, *ln;
/*
* Delete all of the remaining links from this device to any other
* devices (either consumers or suppliers).
*/
device_links_write_lock();
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
WARN_ON(link->status == DL_STATE_ACTIVE);
__device_link_del(link);
}
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
WARN_ON(link->status != DL_STATE_DORMANT &&
link->status != DL_STATE_NONE);
__device_link_del(link);
}
device_links_write_unlock();
}
/* Device links support end. */
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
error = device_create_file(dev, &dev_attr_deferred_probe);
if (error)
goto err_remove_online;
return 0;
err_remove_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);

View File

@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static ssize_t deferred_probe_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
bool value;
mutex_lock(&deferred_probe_mutex);
value = !list_empty(&dev->p->deferred_probe);
mutex_unlock(&deferred_probe_mutex);
return sprintf(buf, "%d\n", value);
}
DEVICE_ATTR_RO(deferred_probe);
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@ -337,6 +351,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
ret = device_links_check_suppliers(dev);
if (ret)
return ret;
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@ -415,6 +433,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@ -507,6 +526,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@ -517,6 +537,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
pm_runtime_put_suppliers(dev);
return ret;
}
@ -771,7 +792,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
static void __device_release_driver(struct device *dev)
static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@ -780,7 +801,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent)
device_unlock(parent);
device_links_unbind_consumers(dev);
if (parent)
device_lock(parent);
device_lock(dev);
/*
* A concurrent invocation of the same function might
* have released the driver successfully while this one
* was waiting, so check for that.
*/
if (dev->driver != drv)
return;
}
pm_runtime_get_sync(dev);
pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@ -795,6 +836,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@ -811,12 +854,32 @@ static void __device_release_driver(struct device *dev)
}
}
void device_release_driver_internal(struct device *dev,
struct device_driver *drv,
struct device *parent)
{
if (parent)
device_lock(parent);
device_lock(dev);
if (!drv || drv == dev->driver)
__device_release_driver(dev, parent);
device_unlock(dev);
if (parent)
device_unlock(parent);
}
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
*
* If this function is to be called with @dev->parent lock held, ensure that
* the device's consumers are unbound in advance or that their locks can be
* acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@ -825,9 +888,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
device_lock(dev);
__device_release_driver(dev);
device_unlock(dev);
device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@ -852,15 +913,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
if (dev->parent) /* Needed for USB */
device_lock(dev->parent);
device_lock(dev);
if (dev->driver == drv)
__device_release_driver(dev);
device_unlock(dev);
if (dev->parent)
device_unlock(dev->parent);
device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}

View File

@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
static void dpm_wait_for_suppliers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* If the supplier goes away right after we've checked the link to it,
* we'll wait for its completion to change the state, but that's fine,
* because the only things that will block as a result are the SRCU
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_superior(struct device *dev, bool async)
{
dpm_wait(dev->parent, async);
dpm_wait_for_suppliers(dev, async);
}
static void dpm_wait_for_consumers(struct device *dev, bool async)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
/*
* The status of a device link can only be changed from "dormant" by a
* probe, but that cannot happen during system suspend/resume. In
* theory it can change to "dormant" at that time, but then it is
* reasonable to wait for the target device anyway (eg. if it goes
* away, it's better to wait for it to go away completely and then
* continue instead of trying to continue in parallel with its
* unregistration).
*/
list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
}
static void dpm_wait_for_subordinate(struct device *dev, bool async)
{
dpm_wait_for_children(dev, async);
dpm_wait_for_consumers(dev, async);
}
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
dpm_wait(dev->parent, async);
dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@ -1038,7 +1096,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
dpm_wait_for_children(dev, async);
dpm_wait_for_subordinate(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@ -1185,7 +1243,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
dpm_wait_for_children(dev, async);
dpm_wait_for_subordinate(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
static void dpm_clear_suppliers_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
}
device_links_read_unlock(idx);
}
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
dpm_wait_for_children(dev, async);
dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@ -1454,6 +1528,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);

View File

@ -127,6 +127,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
static inline bool device_pm_initialized(struct device *dev)
{
return dev->power.in_dpm_list;
}
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@ -146,6 +151,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
static inline bool device_pm_initialized(struct device *dev)
{
return device_is_registered(dev);
}
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)

View File

@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@ -258,6 +260,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
static int rpm_get_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
int retval;
if (!(link->flags & DL_FLAG_PM_RUNTIME))
continue;
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
link->rpm_active)
continue;
retval = pm_runtime_get_sync(link->supplier);
if (retval < 0) {
pm_runtime_put_noidle(link->supplier);
return retval;
}
link->rpm_active = true;
}
return 0;
}
static void rpm_put_suppliers(struct device *dev)
{
struct device_link *link;
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->rpm_active &&
READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
pm_runtime_put(link->supplier);
link->rpm_active = false;
}
}
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@ -266,19 +304,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
int retval, idx;
bool use_links = dev->power.links_count > 0;
if (dev->power.irq_safe)
if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
else
} else {
spin_unlock_irq(&dev->power.lock);
/*
* Resume suppliers if necessary.
*
* The device's runtime PM status cannot change until this
* routine returns, so it is safe to read the status outside of
* the lock.
*/
if (use_links && dev->power.runtime_status == RPM_RESUMING) {
idx = device_links_read_lock();
retval = rpm_get_suppliers(dev);
if (retval)
goto fail;
device_links_read_unlock(idx);
}
}
retval = cb(dev);
if (dev->power.irq_safe)
if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
else
} else {
/*
* If the device is suspending and the callback has returned
* success, drop the usage counters of the suppliers that have
* been reference counted on its resume.
*
* Do that if resume fails too.
*/
if (use_links
&& ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
idx = device_links_read_lock();
fail:
rpm_put_suppliers(dev);
device_links_read_unlock(idx);
}
spin_lock_irq(&dev->power.lock);
}
return retval;
}
@ -1446,6 +1522,94 @@ void pm_runtime_remove(struct device *dev)
pm_runtime_reinit(dev);
}
/**
* pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
* @dev: Device whose driver is going to be removed.
*
* Check links from this device to any consumers and if any of them have active
* runtime PM references to the device, drop the usage counter of the device
* (once per link).
*
* Links with the DL_FLAG_STATELESS flag set are ignored.
*
* Since the device is guaranteed to be runtime-active at the point this is
* called, nothing else needs to be done here.
*
* Moreover, this is called after device_links_busy() has returned 'false', so
* the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
* therefore rpm_active can't be manipulated concurrently.
*/
void pm_runtime_clean_up_links(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
if (link->rpm_active) {
pm_runtime_put_noidle(dev);
link->rpm_active = false;
}
}
device_links_read_unlock(idx);
}
/**
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_get_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_get_sync(link->supplier);
device_links_read_unlock(idx);
}
/**
* pm_runtime_put_suppliers - Drop references to supplier devices.
* @dev: Consumer device.
*/
void pm_runtime_put_suppliers(struct device *dev)
{
struct device_link *link;
int idx;
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
if (link->flags & DL_FLAG_PM_RUNTIME)
pm_runtime_put(link->supplier);
device_links_read_unlock(idx);
}
void pm_runtime_new_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.links_count++;
spin_unlock_irq(&dev->power.lock);
}
void pm_runtime_drop_link(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
WARN_ON(dev->power.links_count == 0);
dev->power.links_count--;
spin_unlock_irq(&dev->power.lock);
}
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.

View File

@ -0,0 +1,9 @@
config TEST_ASYNC_DRIVER_PROBE
tristate "Build kernel module to test asynchronous driver probing"
depends on m
help
Enabling this option produces a kernel module that allows
testing asynchronous driver probing by the device core.
The module name will be test_async_driver_probe.ko
If unsure say N.

View File

@ -0,0 +1 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o

View File

@ -0,0 +1,171 @@
/*
* Copyright (C) 2014 Google, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/time.h>
#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
static int test_probe(struct platform_device *pdev)
{
dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
TEST_PROBE_DELAY);
msleep(TEST_PROBE_DELAY);
dev_info(&pdev->dev, "done sleeping\n");
return 0;
}
static struct platform_driver async_driver = {
.driver = {
.name = "test_async_driver",
.owner = THIS_MODULE,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_driver sync_driver = {
.driver = {
.name = "test_sync_driver",
.owner = THIS_MODULE,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
},
.probe = test_probe,
};
static struct platform_device *async_dev_1, *async_dev_2;
static struct platform_device *sync_dev_1;
static int __init test_async_probe_init(void)
{
ktime_t calltime, delta;
unsigned long long duration;
int error;
pr_info("registering first asynchronous device...\n");
async_dev_1 = platform_device_register_simple("test_async_driver", 1,
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(async_dev_1);
pr_err("failed to create async_dev_1: %d", error);
return error;
}
pr_info("registering asynchronous driver...\n");
calltime = ktime_get();
error = platform_driver_register(&async_driver);
if (error) {
pr_err("Failed to register async_driver: %d\n", error);
goto err_unregister_async_dev_1;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
error = -ETIMEDOUT;
goto err_unregister_async_driver;
}
pr_info("registering second asynchronous device...\n");
calltime = ktime_get();
async_dev_2 = platform_device_register_simple("test_async_driver", 2,
NULL, 0);
if (IS_ERR(async_dev_2)) {
error = PTR_ERR(async_dev_2);
pr_err("failed to create async_dev_2: %d", error);
goto err_unregister_async_driver;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
error = -ETIMEDOUT;
goto err_unregister_async_dev_2;
}
pr_info("registering synchronous driver...\n");
error = platform_driver_register(&sync_driver);
if (error) {
pr_err("Failed to register async_driver: %d\n", error);
goto err_unregister_async_dev_2;
}
pr_info("registering synchronous device...\n");
calltime = ktime_get();
sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
NULL, 0);
if (IS_ERR(async_dev_1)) {
error = PTR_ERR(sync_dev_1);
pr_err("failed to create sync_dev_1: %d", error);
goto err_unregister_sync_driver;
}
delta = ktime_sub(ktime_get(), calltime);
duration = (unsigned long long) ktime_to_ms(delta);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe was too quick\n");
error = -ETIMEDOUT;
goto err_unregister_sync_dev_1;
}
pr_info("completed successfully");
return 0;
err_unregister_sync_dev_1:
platform_device_unregister(sync_dev_1);
err_unregister_sync_driver:
platform_driver_unregister(&sync_driver);
err_unregister_async_dev_2:
platform_device_unregister(async_dev_2);
err_unregister_async_driver:
platform_driver_unregister(&async_driver);
err_unregister_async_dev_1:
platform_device_unregister(async_dev_1);
return error;
}
module_init(test_async_probe_init);
static void __exit test_async_probe_exit(void)
{
platform_driver_unregister(&async_driver);
platform_driver_unregister(&sync_driver);
platform_device_unregister(async_dev_1);
platform_device_unregister(async_dev_2);
platform_device_unregister(sync_dev_1);
}
module_exit(test_async_probe_exit);
MODULE_DESCRIPTION("Test module for asynchronous driver probing");
MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
MODULE_LICENSE("GPL");

View File

@ -71,6 +71,7 @@ struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
};
/*

View File

@ -62,6 +62,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp)
return filp->f_path.dentry->d_fsdata;
}
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
return simple_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
.llseek = generic_file_llseek, \
}
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
@ -99,21 +114,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
return simple_attr_open(inode, file, __get, __set, __fmt); \
} \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
.llseek = generic_file_llseek, \
}
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@ -233,8 +233,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
__releases(&debugfs_srcu)
{ }
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
static const struct file_operations __fops = { 0 }
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
return -ENODEV;
}
static inline ssize_t debugfs_attr_write(struct file *file,
const char __user *buf,
size_t len, loff_t *ppos)
{
return -ENODEV;
}
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)

View File

@ -707,6 +707,87 @@ struct device_dma_parameters {
unsigned long segment_boundary_mask;
};
/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
* @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
* @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
* @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
* @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
*/
enum device_link_state {
DL_STATE_NONE = -1,
DL_STATE_DORMANT = 0,
DL_STATE_AVAILABLE,
DL_STATE_CONSUMER_PROBE,
DL_STATE_ACTIVE,
DL_STATE_SUPPLIER_UNBIND,
};
/*
* Device link flags.
*
* STATELESS: The core won't track the presence of supplier/consumer drivers.
* AUTOREMOVE: Remove this link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE BIT(1)
#define DL_FLAG_PM_RUNTIME BIT(2)
#define DL_FLAG_RPM_ACTIVE BIT(3)
/**
* struct device_link - Device link representation.
* @supplier: The device on the supplier end of the link.
* @s_node: Hook to the supplier device's list of links to consumers.
* @consumer: The device on the consumer end of the link.
* @c_node: Hook to the consumer device's list of links to suppliers.
* @status: The state of the link (with respect to the presence of drivers).
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
*/
struct device_link {
struct device *supplier;
struct list_head s_node;
struct device *consumer;
struct list_head c_node;
enum device_link_state status;
u32 flags;
bool rpm_active;
#ifdef CONFIG_SRCU
struct rcu_head rcu_head;
#endif
};
/**
* enum dl_dev_state - Device driver presence tracking information.
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
* @DL_DEV_PROBING: A driver is probing.
* @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
* @DL_DEV_UNBINDING: The driver is unbinding from the device.
*/
enum dl_dev_state {
DL_DEV_NO_DRIVER = 0,
DL_DEV_PROBING,
DL_DEV_DRIVER_BOUND,
DL_DEV_UNBINDING,
};
/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
enum dl_dev_state status;
};
/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
@ -799,6 +880,7 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@ -1116,6 +1198,10 @@ extern void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
/* Device links interface. */
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
#ifdef CONFIG_PRINTK

View File

@ -559,6 +559,7 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
@ -596,6 +597,7 @@ struct dev_pm_info {
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;

View File

@ -55,6 +55,11 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_clean_up_links(struct device *dev);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
@ -186,6 +191,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
static inline void pm_runtime_clean_up_links(struct device *dev) {}
static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */

View File

@ -56,7 +56,7 @@ static const char *kobject_actions[] = {
* kobject_action_type - translate action string to numeric type
*
* @buf: buffer containing the action string, newline is ignored
* @len: length of buffer
* @count: length of buffer
* @type: pointer to the location to store the action type
*
* Returns 0 if the action string was recognized.
@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info)
/**
* kobject_uevent_env - send an uevent with environmental data
*
* @action: action that is happening
* @kobj: struct kobject that the action is happening to
* @action: action that is happening
* @envp_ext: pointer to environmental data
*
* Returns 0 if kobject_uevent_env() is completed with success or the
@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
* kobject_uevent - notify userspace by sending an uevent
*
* @action: action that is happening
* @kobj: struct kobject that the action is happening to
* @action: action that is happening
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.