arch: Cleanup enable/disable_hlt

enable/disable_hlt() does not need to be exported and can be killed on
architectures which do not use it at all.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Magnus Damm <magnus.damm@gmail.com>
Link: http://lkml.kernel.org/r/20130321215233.377959540@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2013-03-21 22:49:31 +01:00
parent 31880c37c1
commit 6546327ad1
7 changed files with 3 additions and 60 deletions

View file

@ -64,16 +64,12 @@ void disable_hlt(void)
hlt_counter++;
}
EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void)
{
hlt_counter--;
BUG_ON(hlt_counter < 0);
}
EXPORT_SYMBOL(enable_hlt);
static int __init nohlt_setup(char *__unused)
{
hlt_counter = 1;

View file

@ -20,16 +20,14 @@
extern void stop_watchdog(void);
extern int cris_hlt_counter;
/* We use this if we don't have any better idle routine. */
void default_idle(void)
{
local_irq_disable();
if (!need_resched() && !cris_hlt_counter) {
/* Halt until exception. */
if (!need_resched()) {
/* Halt until exception. */
__asm__ volatile("ei \n\t"
"halt ");
"halt ");
}
local_irq_enable();
}

View file

@ -65,13 +65,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define cpu_relax() barrier()
/*
* disable hlt during certain critical i/o operations
*/
#define HAVE_DISABLE_HLT
void disable_hlt(void);
void enable_hlt(void);
void default_idle(void);
#endif /* __ASM_CRIS_PROCESSOR_H */

View file

@ -29,31 +29,6 @@
//#define DEBUG
/*
* The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
* there would ever be a halt sequence (for power save when idle) with
* some largish delay when halting or resuming *and* a driver that can't
* afford that delay. The hlt_counter would then be checked before
* executing the halt sequence, and the driver marks the unhaltable
* region by enable_hlt/disable_hlt.
*/
int cris_hlt_counter=0;
void disable_hlt(void)
{
cris_hlt_counter++;
}
EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void)
{
cris_hlt_counter--;
}
EXPORT_SYMBOL(enable_hlt);
extern void default_idle(void);
void (*pm_power_off)(void);

View file

@ -160,8 +160,6 @@ unsigned long get_wchan(struct task_struct *p);
# define STACK_TOP TASK_SIZE
# define STACK_TOP_MAX STACK_TOP
void disable_hlt(void);
void enable_hlt(void);
void default_idle(void);
#ifdef CONFIG_DEBUG_FS

View file

@ -46,18 +46,6 @@ EXPORT_SYMBOL(pm_power_off);
static int hlt_counter = 1;
void disable_hlt(void)
{
hlt_counter++;
}
EXPORT_SYMBOL(disable_hlt);
void enable_hlt(void)
{
hlt_counter--;
}
EXPORT_SYMBOL(enable_hlt);
static int __init nohlt_setup(char *__unused)
{
hlt_counter = 1;

View file

@ -935,8 +935,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
int timeout;
#ifdef CONFIG_HOTPLUG_CPU
if (num_online_cpus() == 1)
disable_hlt();
if (sleep_mode[cpu])
run_wakeup_cpu(cpu);
#endif /* CONFIG_HOTPLUG_CPU */
@ -1003,9 +1001,6 @@ int __cpu_disable(void)
void __cpu_die(unsigned int cpu)
{
run_sleep_cpu(cpu);
if (num_online_cpus() == 1)
enable_hlt();
}
#ifdef CONFIG_MN10300_CACHE_ENABLED