1
0
Fork 0

regulator: core: Optimise enable/disable path for always on regulators

If a regulator is always on for any reason then cache that when the
consumer is created and use it to optimise away the need to take locks
or recurse up the supply tree when consumers do enable or disable calls.
The scheduling of asynchronous work for bulk enables is also skipped.

We don't actually check if the device physically supports control on the
basis that constraints allowing status changes on physically always on
regulators are nonsensical anyway.

This is a very common pattern in hardware - it's normal to have some
power supplies that have either no software control or are critical to
system function - so many systems should be able to benefit.

Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Acked-by: Liam Girdwood <lrg@ti.com>
hifive-unleashed-5.1
Mark Brown 2012-04-19 13:19:07 +01:00
parent 854ccbaee7
commit 6492bc1b1a
1 changed files with 40 additions and 14 deletions

View File

@ -74,6 +74,7 @@ struct regulator_map {
struct regulator {
struct device *dev;
struct list_head list;
unsigned int always_on:1;
int uA_load;
int min_uV;
int max_uV;
@ -155,6 +156,17 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
return regnode;
}
static int _regulator_can_change_status(struct regulator_dev *rdev)
{
if (!rdev->constraints)
return 0;
if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
return 1;
else
return 0;
}
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
@ -1141,6 +1153,15 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
&regulator->max_uV);
}
/*
* Check now if the regulator is an always on regulator - if
* it is then we don't need to do nearly so much work for
* enable/disable calls.
*/
if (!_regulator_can_change_status(rdev) &&
_regulator_is_enabled(rdev))
regulator->always_on = true;
mutex_unlock(&rdev->mutex);
return regulator;
link_name_err:
@ -1443,17 +1464,6 @@ void devm_regulator_put(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
static int _regulator_can_change_status(struct regulator_dev *rdev)
{
if (!rdev->constraints)
return 0;
if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
return 1;
else
return 0;
}
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
@ -1533,6 +1543,9 @@ int regulator_enable(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
if (regulator->always_on)
return 0;
if (rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret != 0)
@ -1611,6 +1624,9 @@ int regulator_disable(struct regulator *regulator)
struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
if (regulator->always_on)
return 0;
mutex_lock(&rdev->mutex);
ret = _regulator_disable(rdev);
mutex_unlock(&rdev->mutex);
@ -1719,6 +1735,9 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
struct regulator_dev *rdev = regulator->rdev;
int ret;
if (regulator->always_on)
return 0;
mutex_lock(&rdev->mutex);
rdev->deferred_disables++;
mutex_unlock(&rdev->mutex);
@ -1757,6 +1776,9 @@ int regulator_is_enabled(struct regulator *regulator)
{
int ret;
if (regulator->always_on)
return 1;
mutex_lock(&regulator->rdev->mutex);
ret = _regulator_is_enabled(regulator->rdev);
mutex_unlock(&regulator->rdev->mutex);
@ -2539,9 +2561,13 @@ int regulator_bulk_enable(int num_consumers,
int i;
int ret = 0;
for (i = 0; i < num_consumers; i++)
async_schedule_domain(regulator_bulk_enable_async,
&consumers[i], &async_domain);
for (i = 0; i < num_consumers; i++) {
if (consumers[i].consumer->always_on)
consumers[i].ret = 0;
else
async_schedule_domain(regulator_bulk_enable_async,
&consumers[i], &async_domain);
}
async_synchronize_full_domain(&async_domain);