1
0
Fork 0

sched/idle: Reflow cpuidle_idle_call()

Apply goto to reduce lines and nesting levels.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-cc6vb0snt3sr7op6rlbfeqfh@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Peter Zijlstra 2014-04-11 13:55:48 +02:00 committed by Ingo Molnar
parent c444117f0f
commit 37352273ad
1 changed files with 57 additions and 72 deletions

View File

@ -73,7 +73,7 @@ static int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state, ret;
int next_state, entered_state;
bool broadcast;
/*
@ -102,90 +102,75 @@ static int cpuidle_idle_call(void)
* Check if the cpuidle framework is ready, otherwise fallback
* to the default arch specific idle method
*/
ret = cpuidle_enabled(drv, dev);
if (!ret) {
if (cpuidle_enabled(drv, dev)) {
use_default:
/*
* Ask the governor to choose an idle state it thinks
* it is convenient to go to. There is *always* a
* convenient idle state
* We can't use the cpuidle framework, let's use the default
* idle routine.
*/
next_state = cpuidle_select(drv, dev);
/*
* The idle task must be scheduled, it is pointless to
* go to idle, just update no idle residency and get
* out of this function
*/
if (current_clr_polling_and_test()) {
dev->last_residency = 0;
entered_state = next_state;
if (current_clr_polling_and_test())
local_irq_enable();
} else {
broadcast = !!(drv->states[next_state].flags &
CPUIDLE_FLAG_TIMER_STOP);
else
arch_cpu_idle();
if (broadcast) {
/*
* Tell the time framework to switch
* to a broadcast timer because our
* local timer will be shutdown. If a
* local timer is used from another
* cpu as a broadcast timer, this call
* may fail if it is not available
*/
ret = clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
&dev->cpu);
}
if (!ret) {
trace_cpu_idle_rcuidle(next_state, dev->cpu);
/*
* Enter the idle state previously
* returned by the governor
* decision. This function will block
* until an interrupt occurs and will
* take care of re-enabling the local
* interrupts
*/
entered_state = cpuidle_enter(drv, dev,
next_state);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
dev->cpu);
if (broadcast)
clockevents_notify(
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
&dev->cpu);
/*
* Give the governor an opportunity to reflect on the
* outcome
*/
cpuidle_reflect(dev, entered_state);
}
}
goto exit_idle;
}
/*
* We can't use the cpuidle framework, let's use the default
* idle routine
* Ask the governor to choose an idle state it thinks
* it is convenient to go to. There is *always* a
* convenient idle state
*/
if (ret) {
if (!current_clr_polling_and_test())
arch_cpu_idle();
else
local_irq_enable();
next_state = cpuidle_select(drv, dev);
/*
* The idle task must be scheduled, it is pointless to
* go to idle, just update no idle residency and get
* out of this function
*/
if (current_clr_polling_and_test()) {
dev->last_residency = 0;
entered_state = next_state;
local_irq_enable();
goto exit_idle;
}
broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
/*
* Tell the time framework to switch to a broadcast timer
* because our local timer will be shutdown. If a local timer
* is used from another cpu as a broadcast timer, this call may
* fail if it is not available
*/
if (broadcast &&
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
goto use_default;
trace_cpu_idle_rcuidle(next_state, dev->cpu);
/*
* Enter the idle state previously returned by the governor decision.
* This function will block until an interrupt occurs and will take
* care of re-enabling the local interrupts
*/
entered_state = cpuidle_enter(drv, dev, next_state);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
if (broadcast)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
/*
* Give the governor an opportunity to reflect on the outcome
*/
cpuidle_reflect(dev, entered_state);
exit_idle:
__current_set_polling();
/*
* It is up to the idle functions to enable back the local
* interrupt
* It is up to the idle functions to reenable local interrupts
*/
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();