[PATCH] x86: more asm cleanups

Some more assembler cleanups I noticed along the way.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Zachary Amsden 2005-09-03 15:56:42 -07:00 committed by Linus Torvalds
parent 4f0cb8d978
commit f2ab446124
8 changed files with 35 additions and 50 deletions

View file

@ -82,16 +82,13 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
*/ */
static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
{ {
unsigned int eax; unsigned int eax, ebx, ecx, edx;
if (c->cpuid_level < 4) if (c->cpuid_level < 4)
return 1; return 1;
__asm__("cpuid" /* Intel has a non-standard dependency on %ecx for this CPUID level. */
: "=a" (eax) cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
: "0" (4), "c" (0)
: "bx", "dx");
if (eax & 0x1f) if (eax & 0x1f)
return ((eax >> 26) + 1); return ((eax >> 26) + 1);
else else

View file

@ -153,7 +153,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
disable_local_APIC(); disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi); atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */ /* Assume hlt works */
__asm__("hlt"); halt();
for(;;); for(;;);
return 1; return 1;

View file

@ -93,10 +93,7 @@ static void set_idt(void *newidt, __u16 limit)
curidt.size = limit; curidt.size = limit;
curidt.address = (unsigned long)newidt; curidt.address = (unsigned long)newidt;
__asm__ __volatile__ ( load_idt(&curidt);
"lidtl %0\n"
: : "m" (curidt)
);
}; };
@ -108,10 +105,7 @@ static void set_gdt(void *newgdt, __u16 limit)
curgdt.size = limit; curgdt.size = limit;
curgdt.address = (unsigned long)newgdt; curgdt.address = (unsigned long)newgdt;
__asm__ __volatile__ ( load_gdt(&curgdt);
"lgdtl %0\n"
: : "m" (curgdt)
);
}; };
static void load_segments(void) static void load_segments(void)

View file

@ -46,23 +46,13 @@
static struct class *msr_class; static struct class *msr_class;
/* Note: "err" is handled in a funny way below. Otherwise one version
of gcc or another breaks. */
static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx) static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
{ {
int err; int err;
asm volatile ("1: wrmsr\n" err = wrmsr_safe(reg, eax, edx);
"2:\n" if (err)
".section .fixup,\"ax\"\n" err = -EIO;
"3: movl %4,%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n" " .long 1b,3b\n" ".previous":"=&bDS" (err)
:"a"(eax), "d"(edx), "c"(reg), "i"(-EIO), "0"(0));
return err; return err;
} }
@ -70,18 +60,9 @@ static inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
{ {
int err; int err;
asm volatile ("1: rdmsr\n" err = rdmsr_safe(reg, eax, edx);
"2:\n" if (err)
".section .fixup,\"ax\"\n" err = -EIO;
"3: movl %4,%0\n"
" jmp 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 1b,3b\n"
".previous":"=&bDS" (err), "=a"(*eax), "=d"(*edx)
:"c"(reg), "i"(-EIO), "0"(0));
return err; return err;
} }

View file

@ -164,7 +164,7 @@ static inline void play_dead(void)
*/ */
local_irq_disable(); local_irq_disable();
while (1) while (1)
__asm__ __volatile__("hlt":::"memory"); halt();
} }
#else #else
static inline void play_dead(void) static inline void play_dead(void)

View file

@ -234,10 +234,9 @@ voyager_power_off(void)
#endif #endif
} }
/* and wait for it to happen */ /* and wait for it to happen */
for(;;) { local_irq_disable();
__asm("cli"); for(;;)
__asm("hlt"); halt();
}
} }
/* copied from process.c */ /* copied from process.c */
@ -278,10 +277,9 @@ machine_restart(char *cmd)
outb(basebd | 0x08, VOYAGER_MC_SETUP); outb(basebd | 0x08, VOYAGER_MC_SETUP);
outb(0x02, catbase + 0x21); outb(0x02, catbase + 0x21);
} }
for(;;) { local_irq_disable();
asm("cli"); for(;;)
asm("hlt"); halt();
}
} }
void void

View file

@ -1015,7 +1015,7 @@ smp_stop_cpu_function(void *dummy)
cpu_clear(smp_processor_id(), cpu_online_map); cpu_clear(smp_processor_id(), cpu_online_map);
local_irq_disable(); local_irq_disable();
for(;;) for(;;)
__asm__("hlt"); halt();
} }
static DEFINE_SPINLOCK(call_lock); static DEFINE_SPINLOCK(call_lock);

View file

@ -47,6 +47,21 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
: "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT));\
ret__; }) ret__; })
/* rdmsr with exception handling */
#define rdmsr_safe(msr,a,b) ({ int ret__; \
asm volatile("2: rdmsr ; xorl %0,%0\n" \
"1:\n\t" \
".section .fixup,\"ax\"\n\t" \
"3: movl %4,%0 ; jmp 1b\n\t" \
".previous\n\t" \
".section __ex_table,\"a\"\n" \
" .align 4\n\t" \
" .long 2b,3b\n\t" \
".previous" \
: "=r" (ret__), "=a" (*(a)), "=d" (*(b)) \
: "c" (msr), "i" (-EFAULT));\
ret__; })
#define rdtsc(low,high) \ #define rdtsc(low,high) \
__asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))