1
0
Fork 0

Merge branch 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull more operating performance points (OPP) framework changes for v4.21
from Viresh Kumar:

"- Fix missing OPP debugfs directory (Viresh Kumar).

 - Make genpd performance states orthogonal to idlestates (Ulf
   Hansson).

 - Propagate performance state changes from genpd to its master (Viresh
   Kumar).

 - Minor improvement of some OPP helpers (Viresh Kumar)."

* 'opp/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  PM / Domains: Propagate performance state updates
  PM / Domains: Factorize dev_pm_genpd_set_performance_state()
  PM / Domains: Save OPP table pointer in genpd
  OPP: Don't return 0 on error from of_get_required_opp_performance_state()
  OPP: Add dev_pm_opp_xlate_performance_state() helper
  OPP: Improve _find_table_of_opp_np()
  PM / Domains: Make genpd performance states orthogonal to the idlestates
  OPP: Fix missing debugfs supply directory for OPPs
  OPP: Use opp_table->regulators to verify no regulator case
hifive-unleashed-5.1
Rafael J. Wysocki 2018-12-14 12:53:34 +01:00
commit bcbeef5f00
6 changed files with 278 additions and 79 deletions

View File

@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
unsigned int state)
{
struct generic_pm_domain_data *pd_data;
struct pm_domain_data *pdd;
struct gpd_link *link;
/* New requested state is same as Max requested state */
if (state == genpd->performance_state)
return state;
/* New requested state is higher than Max requested state */
if (state > genpd->performance_state)
return state;
/* Traverse all devices within the domain */
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
pd_data = to_gpd_data(pdd);
if (pd_data->performance_state > state)
state = pd_data->performance_state;
}
/*
* Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state
* field is protected by the master genpd->lock, which is already taken.
*
* Also note that link->performance_state (subdomain's performance state
* requirement to master domain) is different from
* link->slave->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a
* different value.
*
* Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now.
*/
list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->performance_state > state)
state = link->performance_state;
}
return state;
}
static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth)
{
struct generic_pm_domain *master;
struct gpd_link *link;
int master_state, ret;
if (state == genpd->performance_state)
return 0;
/* Propagate to masters of genpd */
list_for_each_entry(link, &genpd->slave_links, slave_node) {
master = link->master;
if (!master->set_performance_state)
continue;
/* Find master's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
master->opp_table,
state);
if (unlikely(ret < 0))
goto err;
master_state = ret;
genpd_lock_nested(master, depth + 1);
link->prev_performance_state = link->performance_state;
link->performance_state = master_state;
master_state = _genpd_reeval_performance_state(master,
master_state);
ret = _genpd_set_performance_state(master, master_state, depth + 1);
if (ret)
link->performance_state = link->prev_performance_state;
genpd_unlock(master);
if (ret)
goto err;
}
ret = genpd->set_performance_state(genpd, state);
if (ret)
goto err;
genpd->performance_state = state;
return 0;
err:
/* Encountered an error, lets rollback */
list_for_each_entry_continue_reverse(link, &genpd->slave_links,
slave_node) {
master = link->master;
if (!master->set_performance_state)
continue;
genpd_lock_nested(master, depth + 1);
master_state = link->prev_performance_state;
link->performance_state = master_state;
master_state = _genpd_reeval_performance_state(master,
master_state);
if (_genpd_set_performance_state(master, master_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n",
master->name, master_state);
}
genpd_unlock(master);
}
return ret;
}
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
struct generic_pm_domain_data *gpd_data, *pd_data;
struct pm_domain_data *pdd;
struct generic_pm_domain_data *gpd_data;
unsigned int prev;
int ret = 0;
int ret;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
prev = gpd_data->performance_state;
gpd_data->performance_state = state;
/* New requested state is same as Max requested state */
if (state == genpd->performance_state)
goto unlock;
state = _genpd_reeval_performance_state(genpd, state);
ret = _genpd_set_performance_state(genpd, state, 0);
if (ret)
gpd_data->performance_state = prev;
/* New requested state is higher than Max requested state */
if (state > genpd->performance_state)
goto update_state;
/* Traverse all devices within the domain */
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
pd_data = to_gpd_data(pdd);
if (pd_data->performance_state > state)
state = pd_data->performance_state;
}
if (state == genpd->performance_state)
goto unlock;
/*
* We aren't propagating performance state changes of a subdomain to its
* masters as we don't have hardware that needs it. Over that, the
* performance states of subdomain and its masters may not have
* one-to-one mapping and would require additional information. We can
* get back to this once we have hardware that needs it. For that
* reason, we don't have to consider performance state of the subdomains
* of genpd here.
*/
update_state:
if (genpd_status_on(genpd)) {
ret = genpd->set_performance_state(genpd, state);
if (ret) {
gpd_data->performance_state = prev;
goto unlock;
}
}
genpd->performance_state = state;
unlock:
genpd_unlock(genpd);
return ret;
@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (unlikely(genpd->set_performance_state)) {
ret = genpd->set_performance_state(genpd, genpd->performance_state);
if (ret) {
pr_warn("%s: Failed to set performance state %d (%d)\n",
genpd->name, genpd->performance_state, ret);
}
}
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret);
goto unlock;
}
/*
* Save table for faster processing while setting performance
* state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
WARN_ON(!genpd->opp_table);
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
if (genpd->set_performance_state)
if (genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
goto unlock;
}
@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np,
i, ret);
goto error;
}
/*
* Save table for faster processing while setting
* performance state.
*/
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
WARN_ON(!genpd->opp_table);
}
genpd->provider = &np->fwnode;
@ -1989,8 +2080,10 @@ error:
genpd->provider = NULL;
genpd->has_provider = false;
if (genpd->set_performance_state)
if (genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
}
mutex_unlock(&gpd_list_lock);
@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np)
if (!gpd->set_performance_state)
continue;
dev_pm_opp_put_opp_table(gpd->opp_table);
dev_pm_opp_of_remove_table(&gpd->dev);
}
}

View File

@ -196,12 +196,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
if (IS_ERR(opp_table))
return 0;
count = opp_table->regulator_count;
/* Regulator may not be required for the device */
if (!count)
if (!opp_table->regulators)
goto put_opp_table;
count = opp_table->regulator_count;
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
goto put_opp_table;
@ -843,6 +843,9 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
/* Mark regulator count uninitialized */
opp_table->regulator_count = -1;
opp_dev = _add_opp_dev(dev, opp_table);
if (!opp_dev) {
kfree(opp_table);
@ -1063,7 +1066,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *table)
int count, supply_size;
/* Allocate space for at least one supply */
count = table->regulator_count ? table->regulator_count : 1;
count = table->regulator_count > 0 ? table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * count;
/* allocate new OPP node and supplies structures */
@ -1084,6 +1087,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct regulator *reg;
int i;
if (!opp_table->regulators)
return true;
for (i = 0; i < opp_table->regulator_count; i++) {
reg = opp_table->regulators[i];
@ -1368,7 +1374,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table)
struct dev_pm_set_opp_data *data;
int len, count = opp_table->regulator_count;
if (WARN_ON(!count))
if (WARN_ON(!opp_table->regulators))
return -EINVAL;
/* space for set_opp_data */
@ -1465,7 +1471,7 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = 0;
opp_table->regulator_count = -1;
err:
dev_pm_opp_put_opp_table(opp_table);
@ -1494,7 +1500,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
kfree(opp_table->regulators);
opp_table->regulators = NULL;
opp_table->regulator_count = 0;
opp_table->regulator_count = -1;
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
@ -1707,6 +1713,69 @@ void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table,
dev_err(virt_dev, "Failed to find required device entry\n");
}
/**
* dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
* @src_table: OPP table which has dst_table as one of its required OPP table.
* @dst_table: Required OPP table of the src_table.
* @pstate: Current performance state of the src_table.
*
* This Returns pstate of the OPP (present in @dst_table) pointed out by the
* "required-opps" property of the OPP (present in @src_table) which has
* performance state set to @pstate.
*
* Return: Zero or positive performance state on success, otherwise negative
* value on errors.
*/
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
struct opp_table *dst_table,
unsigned int pstate)
{
struct dev_pm_opp *opp;
int dest_pstate = -EINVAL;
int i;
if (!pstate)
return 0;
/*
* Normally the src_table will have the "required_opps" property set to
* point to one of the OPPs in the dst_table, but in some cases the
* genpd and its master have one to one mapping of performance states
* and so none of them have the "required-opps" property set. Return the
* pstate of the src_table as it is in such cases.
*/
if (!src_table->required_opp_count)
return pstate;
for (i = 0; i < src_table->required_opp_count; i++) {
if (src_table->required_opp_tables[i]->np == dst_table->np)
break;
}
if (unlikely(i == src_table->required_opp_count)) {
pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
__func__, src_table, dst_table);
return -EINVAL;
}
mutex_lock(&src_table->lock);
list_for_each_entry(opp, &src_table->opp_list, node) {
if (opp->pstate == pstate) {
dest_pstate = opp->required_opps[i]->pstate;
goto unlock;
}
}
pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
dst_table);
unlock:
mutex_unlock(&src_table->lock);
return dest_pstate;
}
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
@ -1733,6 +1802,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
if (!opp_table)
return -ENOMEM;
/* Fix regulator count for dynamic OPPs */
opp_table->regulator_count = 1;
ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
if (ret)
dev_pm_opp_put_opp_table(opp_table);

View File

@ -114,19 +114,25 @@ static struct device_node *of_parse_required_opp(struct device_node *np,
static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
struct device_node *opp_table_np;
lockdep_assert_held(&opp_table_lock);
opp_table_np = of_get_parent(opp_np);
if (!opp_table_np)
goto err;
/* It is safe to put the node now as all we need now is its address */
of_node_put(opp_table_np);
list_for_each_entry(opp_table, &opp_tables, node) {
opp = _find_opp_of_np(opp_table, opp_np);
if (opp) {
dev_pm_opp_put(opp);
if (opp_table_np == opp_table->np) {
_get_opp_table_kref(opp_table);
return opp_table;
}
}
err:
return ERR_PTR(-ENODEV);
}
@ -385,12 +391,10 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
struct opp_table *opp_table)
{
u32 *microvolt, *microamp = NULL;
int supplies, vcount, icount, ret, i, j;
int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@ -405,7 +409,13 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
/* Missing property isn't a problem, but an invalid entry is */
if (!prop) {
if (!opp_table->regulator_count)
if (unlikely(supplies == -1)) {
/* Initialize regulator_count */
opp_table->regulator_count = 0;
return 0;
}
if (!supplies)
return 0;
dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
@ -414,6 +424,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
}
}
if (unlikely(supplies == -1)) {
/* Initialize regulator_count */
supplies = opp_table->regulator_count = 1;
} else if (unlikely(!supplies)) {
dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
return -EINVAL;
}
vcount = of_property_count_u32_elems(opp->np, name);
if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
@ -975,19 +993,19 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
* Returns the performance state of the OPP pointed out by the "required-opps"
* property at @index in @np.
*
* Return: Positive performance state on success, otherwise 0 on errors.
* Return: Zero or positive performance state on success, otherwise negative
* value on errors.
*/
unsigned int of_get_required_opp_performance_state(struct device_node *np,
int index)
int of_get_required_opp_performance_state(struct device_node *np, int index)
{
struct dev_pm_opp *opp;
struct device_node *required_np;
struct opp_table *opp_table;
unsigned int pstate = 0;
int pstate = -EINVAL;
required_np = of_parse_required_opp(np, index);
if (!required_np)
return 0;
return -EINVAL;
opp_table = _find_table_of_opp_np(required_np);
if (IS_ERR(opp_table)) {

View File

@ -145,7 +145,9 @@ enum opp_table_access {
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
* @regulators: Supply regulators
* @regulator_count: Number of power supply regulators
* @regulator_count: Number of power supply regulators. Its value can be -1
* (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
* property).
* @genpd_performance_state: Device's power domain support performance state.
* @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
@ -189,7 +191,7 @@ struct opp_table {
const char *prop_name;
struct clk *clk;
struct regulator **regulators;
unsigned int regulator_count;
int regulator_count;
bool genpd_performance_state;
bool is_genpd;

View File

@ -73,6 +73,7 @@ struct genpd_power_state {
struct genpd_lock_ops;
struct dev_pm_opp;
struct opp_table;
struct generic_pm_domain {
struct device dev;
@ -94,6 +95,7 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct opp_table *opp_table; /* OPP table of the genpd */
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
@ -134,6 +136,10 @@ struct gpd_link {
struct list_head master_node;
struct generic_pm_domain *slave;
struct list_head slave_node;
/* Sub-domain's per-master domain performance state */
unsigned int performance_state;
unsigned int prev_performance_state;
};
struct gpd_timing_data {

View File

@ -128,6 +128,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*s
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index);
void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@ -280,6 +281,12 @@ static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev
}
static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {}
static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
{
return -ENOTSUPP;
}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
return -ENOTSUPP;
@ -314,7 +321,7 @@ void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
unsigned int of_get_required_opp_performance_state(struct device_node *np, int index);
int of_get_required_opp_performance_state(struct device_node *np, int index);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@ -353,9 +360,9 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
static inline unsigned int of_get_required_opp_performance_state(struct device_node *np, int index)
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
return 0;
return -ENOTSUPP;
}
#endif