1
0
Fork 0

Merge back earlier cpuidle material for v4.10.

hifive-unleashed-5.1
Rafael J. Wysocki 2016-12-01 14:39:51 +01:00
commit 4e28ec3d5f
13 changed files with 143 additions and 81 deletions

View File

@ -3380,6 +3380,7 @@ M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
B: https://bugzilla.kernel.org
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@ -6289,9 +6290,11 @@ S: Maintained
F: drivers/platform/x86/intel-vbtn.c
INTEL IDLE DRIVER
M: Jacob Pan <jacob.jun.pan@linux.intel.com>
M: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
B: https://bugzilla.kernel.org
S: Supported
F: drivers/idle/intel_idle.c

View File

@ -22,7 +22,7 @@
#define POWERNV_THRESHOLD_LATENCY_NS 200000
struct cpuidle_driver powernv_idle_driver = {
static struct cpuidle_driver powernv_idle_driver = {
.name = "powernv_idle",
.owner = THIS_MODULE,
};

View File

@ -97,7 +97,17 @@ static int find_deepest_state(struct cpuidle_driver *drv,
return ret;
}
#ifdef CONFIG_SUSPEND
/* Set the current cpu to use the deepest idle state, override governors */
void cpuidle_use_deepest_state(bool enable)
{
struct cpuidle_device *dev;
preempt_disable();
dev = cpuidle_get_device();
dev->use_deepest_state = enable;
preempt_enable();
}
/**
* cpuidle_find_deepest_state - Find the deepest available idle state.
* @drv: cpuidle driver for the given CPU.
@ -109,6 +119,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
return find_deepest_state(drv, dev, UINT_MAX, 0, false);
}
#ifdef CONFIG_SUSPEND
static void enter_freeze_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{

View File

@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
* state enter function.
*/
idle_state->enter = match_id->data;
/*
* Since this is not a "coupled" state, it's safe to assume interrupts
* won't be enabled when it exits allowing the tick to be frozen
* safely. So enter() can be also enter_freeze() callback.
*/
idle_state->enter_freeze = match_id->data;
err = of_property_read_u32(state_node, "wakeup-latency-us",
&idle_state->exit_latency);

View File

@ -9,7 +9,6 @@
*/
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/cpuidle.h>
#include "cpuidle.h"
@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
if (cpuidle_curr_governor) {
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_disable_device(dev);
module_put(cpuidle_curr_governor->owner);
}
cpuidle_curr_governor = gov;
if (gov) {
if (!try_module_get(cpuidle_curr_governor->owner))
return -EINVAL;
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();

View File

@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
#include <linux/pm_qos.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
.enable = ladder_enable_device,
.select = ladder_select_state,
.reflect = ladder_reflect,
.owner = THIS_MODULE,
};
/**

View File

@ -19,7 +19,6 @@
#include <linux/tick.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/module.h>
/*
* Please note when changing the tuning values:
@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
.enable = menu_enable_device,
.select = menu_select,
.reflect = menu_reflect,
.owner = THIS_MODULE,
};
/**

View File

@ -245,6 +245,8 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
void play_idle(unsigned long duration_ms);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);

View File

@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int use_deepest_state:1;
unsigned int cpu;
int last_residency;
@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
extern void cpuidle_use_deepest_state(bool enable);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(bool enable)
{
}
#endif
/* kernel/sched/idle.c */
@ -235,8 +240,6 @@ struct cpuidle_governor {
int (*select) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
void (*reflect) (struct cpuidle_device *dev, int index);
struct module *owner;
};
#ifdef CONFIG_CPU_IDLE

View File

@ -2254,6 +2254,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
/*
* Per process flags
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
@ -2611,7 +2612,7 @@ extern struct task_struct *idle_task(int cpu);
*/
static inline bool is_idle_task(const struct task_struct *p)
{
return p->pid == 0;
return !!(p->flags & PF_IDLE);
}
extern struct task_struct *curr_task(int cpu);
extern void ia64_set_curr_task(int cpu, struct task_struct *p);

View File

@ -1540,7 +1540,7 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cleanup_count;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
p->flags |= PF_FORKNOEXEC;
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);

View File

@ -5279,6 +5279,7 @@ void init_idle(struct task_struct *idle, int cpu)
__sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE;
kasan_unpoison_task_stack(idle);

View File

@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
* timekeeping to prevent timer interrupts from kicking us out of idle
* until a proper wakeup interrupt happens.
*/
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
if (idle_should_freeze() || dev->use_deepest_state) {
if (idle_should_freeze()) {
entered_state = cpuidle_enter_freeze(drv, dev);
if (entered_state > 0) {
local_irq_enable();
goto exit_idle;
}
}
next_state = cpuidle_find_deepest_state(drv, dev);
@ -202,76 +205,65 @@ exit_idle:
*
* Called with polling cleared.
*/
static void cpu_idle_loop(void)
static void do_idle(void)
{
int cpu = smp_processor_id();
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
* rq->idle). This means that, if rq->idle has the polling bit set,
* then setting need_resched is guaranteed to cause the CPU to
* reschedule.
*/
while (1) {
/*
* If the arch has a polling bit, we maintain an invariant:
*
* Our polling bit is clear if we're not scheduled (i.e. if
* rq->curr != rq->idle). This means that, if rq->idle has
* the polling bit set, then setting need_resched is
* guaranteed to cause the cpu to reschedule.
*/
__current_set_polling();
tick_nohz_idle_enter();
__current_set_polling();
quiet_vmstat();
tick_nohz_idle_enter();
while (!need_resched()) {
check_pgt_cache();
rmb();
while (!need_resched()) {
check_pgt_cache();
rmb();
if (cpu_is_offline(cpu)) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
local_irq_disable();
arch_cpu_idle_enter();
/*
* In poll mode we reenable interrupts and spin.
*
* Also if we detected in the wakeup from idle
* path that the tick broadcast device expired
* for us, we don't want to go deep idle as we
* know that the IPI is going to arrive right
* away
*/
if (cpu_idle_force_poll || tick_check_broadcast_expired())
cpu_idle_poll();
else
cpuidle_idle_call();
arch_cpu_idle_exit();
if (cpu_is_offline(smp_processor_id())) {
cpuhp_report_idle_dead();
arch_cpu_idle_dead();
}
/*
* Since we fell out of the loop above, we know
* TIF_NEED_RESCHED must be set, propagate it into
* PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();
local_irq_disable();
arch_cpu_idle_enter();
/*
* We promise to call sched_ttwu_pending and reschedule
* if need_resched is set while polling is set. That
* means that clearing polling needs to be visible
* before doing these things.
* In poll mode we reenable interrupts and spin. Also if we
* detected in the wakeup from idle path that the tick
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
smp_mb__after_atomic();
sched_ttwu_pending();
schedule_preempt_disabled();
if (cpu_idle_force_poll || tick_check_broadcast_expired())
cpu_idle_poll();
else
cpuidle_idle_call();
arch_cpu_idle_exit();
}
/*
* Since we fell out of the loop above, we know TIF_NEED_RESCHED must
* be set, propagate it into PREEMPT_NEED_RESCHED.
*
* This is required because for polling idle loops we will not have had
* an IPI to fold the state for us.
*/
preempt_set_need_resched();
tick_nohz_idle_exit();
__current_clr_polling();
/*
* We promise to call sched_ttwu_pending() and reschedule if
* need_resched() is set while polling is set. That means that clearing
* polling needs to be visible before doing these things.
*/
smp_mb__after_atomic();
sched_ttwu_pending();
schedule_preempt_disabled();
}
bool cpu_in_idle(unsigned long pc)
@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
pc < (unsigned long)__cpuidle_text_end;
}
struct idle_timer {
struct hrtimer timer;
int done;
};
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{
struct idle_timer *it = container_of(timer, struct idle_timer, timer);
WRITE_ONCE(it->done, 1);
set_tsk_need_resched(current);
return HRTIMER_NORESTART;
}
void play_idle(unsigned long duration_ms)
{
struct idle_timer it;
/*
* Only FIFO tasks can disable the tick since they don't need the forced
* preemption.
*/
WARN_ON_ONCE(current->policy != SCHED_FIFO);
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
WARN_ON_ONCE(!duration_ms);
rcu_sleep_check();
preempt_disable();
current->flags |= PF_IDLE;
cpuidle_use_deepest_state(true);
it.done = 0;
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
it.timer.function = idle_inject_timer_fn;
hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
while (!READ_ONCE(it.done))
do_idle();
cpuidle_use_deepest_state(false);
current->flags &= ~PF_IDLE;
preempt_fold_need_resched();
preempt_enable();
}
EXPORT_SYMBOL_GPL(play_idle);
void cpu_startup_entry(enum cpuhp_state state)
{
/*
@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
#endif
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
cpu_idle_loop();
while (1)
do_idle();
}