x86_64: move kernel

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Thomas Gleixner 2007-10-11 11:17:24 +02:00
parent 2db55d344e
commit 250c22777f
74 changed files with 457 additions and 479 deletions

View file

@ -174,7 +174,7 @@ no_longmode:
hlt
jmp 1b
#include "../../../x86_64/kernel/verify_cpu_64.S"
#include "../../kernel/verify_cpu_64.S"
/* Be careful here startup_64 needs to be at a predictable
* address so I can export it in an ELF header. Bootloaders

View file

@ -1,5 +1,5 @@
ifeq ($(CONFIG_X86_32),y)
include ${srctree}/arch/x86/kernel/Makefile_32
else
include ${srctree}/arch/x86_64/kernel/Makefile_64
include ${srctree}/arch/x86/kernel/Makefile_64
endif

View file

@ -83,6 +83,4 @@ $(obj)/vsyscall-syms.o: $(src)/vsyscall_32.lds \
$(obj)/vsyscall-sysenter_32.o $(obj)/vsyscall-note_32.o FORCE
$(call if_changed,syscall)
k8-y += ../../x86_64/kernel/k8.o
stacktrace-y += ../../x86_64/kernel/stacktrace.o

View file

@ -49,15 +49,6 @@ obj-y += pcspeaker.o
CFLAGS_vsyscall_64.o := $(PROFILING) -g0
therm_throt-y += ../../x86/kernel/cpu/mcheck/therm_throt.o
bootflag-y += ../../x86/kernel/bootflag.o
cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../x86/kernel/cpuid.o
topology-y += ../../x86/kernel/topology.o
microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../x86/kernel/microcode.o
intel_cacheinfo-y += ../../x86/kernel/cpu/intel_cacheinfo.o
addon_cpuid_features-y += ../../x86/kernel/cpu/addon_cpuid_features.o
quirks-y += ../../x86/kernel/quirks.o
i8237-y += ../../x86/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../x86/kernel/msr.o
alternative-y += ../../x86/kernel/alternative.o
pcspeaker-y += ../../x86/kernel/pcspeaker.o
perfctr-watchdog-y += ../../x86/kernel/cpu/perfctr-watchdog.o

View file

@ -269,7 +269,7 @@ no_longmode:
movb $0xbc,%al ; outb %al,$0x80
jmp no_longmode
#include "../../../x86_64/kernel/verify_cpu_64.S"
#include "../verify_cpu_64.S"
/* This code uses an extended set of video mode numbers. These include:
* Aliases for standard modes

View file

@ -1,2 +1,259 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
#include <xen/hvc-console.h>
#include "../../x86_64/kernel/early_printk.c"
/* Simple VGA output */
#ifdef __i386__
#include <asm/setup.h>
#else
#include <asm/bootsetup.h>
#endif
#define VGABASE (__ISA_IO_base + 0xb8000)
static int max_ypos = 25, max_xpos = 80;
static int current_ypos = 25, current_xpos = 0;
static void early_vga_write(struct console *con, const char *str, unsigned n)
{
char c;
int i, k, j;
while ((c = *str++) != '\0' && n-- > 0) {
if (current_ypos >= max_ypos) {
/* scroll 1 line up */
for (k = 1, j = 0; k < max_ypos; k++, j++) {
for (i = 0; i < max_xpos; i++) {
writew(readw(VGABASE+2*(max_xpos*k+i)),
VGABASE + 2*(max_xpos*j + i));
}
}
for (i = 0; i < max_xpos; i++)
writew(0x720, VGABASE + 2*(max_xpos*j + i));
current_ypos = max_ypos-1;
}
if (c == '\n') {
current_xpos = 0;
current_ypos++;
} else if (c != '\r') {
writew(((0x7 << 8) | (unsigned short) c),
VGABASE + 2*(max_xpos*current_ypos +
current_xpos++));
if (current_xpos >= max_xpos) {
current_xpos = 0;
current_ypos++;
}
}
}
}
static struct console early_vga_console = {
.name = "earlyvga",
.write = early_vga_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
static int early_serial_base = 0x3f8; /* ttyS0 */
#define XMTRDY 0x20
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
#define RXR 0 /* Receive register (READ) */
#define IER 1 /* Interrupt Enable */
#define IIR 2 /* Interrupt ID */
#define FCR 2 /* FIFO control */
#define LCR 3 /* Line control */
#define MCR 4 /* Modem control */
#define LSR 5 /* Line Status */
#define MSR 6 /* Modem Status */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
return timeout ? 0 : -1;
}
static void early_serial_write(struct console *con, const char *s, unsigned n)
{
while (*s && n-- > 0) {
if (*s == '\n')
early_serial_putc('\r');
early_serial_putc(*s);
s++;
}
}
#define DEFAULT_BAUD 9600
static __init void early_serial_init(char *s)
{
unsigned char c;
unsigned divisor;
unsigned baud = DEFAULT_BAUD;
char *e;
if (*s == ',')
++s;
if (*s) {
unsigned port;
if (!strncmp(s,"0x",2)) {
early_serial_base = simple_strtoul(s, &e, 16);
} else {
static int bases[] = { 0x3f8, 0x2f8 };
if (!strncmp(s,"ttyS",4))
s += 4;
port = simple_strtoul(s, &e, 10);
if (port > 1 || s == e)
port = 0;
early_serial_base = bases[port];
}
s += strcspn(s, ",");
if (*s == ',')
s++;
}
outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(0, early_serial_base + IER); /* no interrupt */
outb(0, early_serial_base + FCR); /* no fifo */
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
if (*s) {
baud = simple_strtoul(s, &e, 0);
if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
}
divisor = 115200 / baud;
c = inb(early_serial_base + LCR);
outb(c | DLAB, early_serial_base + LCR);
outb(divisor & 0xff, early_serial_base + DLL);
outb((divisor >> 8) & 0xff, early_serial_base + DLH);
outb(c & ~DLAB, early_serial_base + LCR);
}
static struct console early_serial_console = {
.name = "earlyser",
.write = early_serial_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Console interface to a host file on AMD's SimNow! */
static int simnow_fd;
enum {
MAGIC1 = 0xBACCD00A,
MAGIC2 = 0xCA110000,
XOPEN = 5,
XWRITE = 4,
};
static noinline long simnow(long cmd, long a, long b, long c)
{
long ret;
asm volatile("cpuid" :
"=a" (ret) :
"b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
return ret;
}
static void __init simnow_init(char *str)
{
char *fn = "klog";
if (*str == '=')
fn = ++str;
/* error ignored */
simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
}
static void simnow_write(struct console *con, const char *s, unsigned n)
{
simnow(XWRITE, simnow_fd, (unsigned long)s, n);
}
static struct console simnow_console = {
.name = "simnow",
.write = simnow_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Direct interface for emergencies */
struct console *early_console = &early_vga_console;
static int early_console_initialized = 0;
void early_printk(const char *fmt, ...)
{
char buf[512];
int n;
va_list ap;
va_start(ap,fmt);
n = vscnprintf(buf,512,fmt,ap);
early_console->write(early_console,buf,n);
va_end(ap);
}
static int __initdata keep_early;
static int __init setup_early_printk(char *buf)
{
if (!buf)
return 0;
if (early_console_initialized)
return 0;
early_console_initialized = 1;
if (strstr(buf, "keep"))
keep_early = 1;
if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console = &early_serial_console;
} else if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf);
early_console = &early_serial_console;
} else if (!strncmp(buf, "vga", 3)
&& SCREEN_INFO.orig_video_isVGA == 1) {
max_xpos = SCREEN_INFO.orig_video_cols;
max_ypos = SCREEN_INFO.orig_video_lines;
current_ypos = SCREEN_INFO.orig_y;
early_console = &early_vga_console;
} else if (!strncmp(buf, "simnow", 6)) {
simnow_init(buf + 6);
early_console = &simnow_console;
keep_early = 1;
#ifdef CONFIG_HVC_XEN
} else if (!strncmp(buf, "xen", 3)) {
early_console = &xenboot_console;
#endif
}
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
return 0;
}
early_param("earlyprintk", setup_early_printk);

View file

@ -1 +1,187 @@
#include "../../x86_64/kernel/tsc_sync.c"
/*
* arch/x86_64/kernel/tsc_sync.c: check TSC synchronization.
*
* Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
*
* We check whether all boot CPUs have their TSC's synchronized,
* print a warning if not and turn off the TSC clock-source.
*
* The warp-check is point-to-point between two CPUs, the CPU
* initiating the bootup is the 'source CPU', the freshly booting
* CPU is the 'target CPU'.
*
* Only two CPUs may participate - they can enter in any order.
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
static __cpuinitdata atomic_t start_count;
static __cpuinitdata atomic_t stop_count;
/*
* We use a raw spinlock in this exceptional case, because
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
static __cpuinitdata int nr_warps;
/*
* TSC-warp measurement loop running on both CPUs:
*/
static __cpuinit void check_tsc_warp(void)
{
cycles_t start, now, prev, end;
int i;
start = get_cycles_sync();
/*
* The measurement runs for 20 msecs:
*/
end = start + tsc_khz * 20ULL;
now = start;
for (i = 0; ; i++) {
/*
* We take the global lock, measure TSC, save the
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
now = get_cycles_sync();
last_tsc = now;
__raw_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
* measurement is done [we also insert a 100 million
* loops safety exit, so we dont lock up in case the
* TSC readout is totally broken]):
*/
if (unlikely(!(i & 7))) {
if (now > end || i > 100000000)
break;
cpu_relax();
touch_nmi_watchdog();
}
/*
* Outside the critical section we can now see whether
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
__raw_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
__raw_spin_unlock(&sync_lock);
}
}
}
/*
* Source CPU calls into this - it waits for the freshly booted
* target CPU to arrive and then starts the measurement:
*/
void __cpuinit check_tsc_sync_source(int cpu)
{
int cpus = 2;
/*
* No need to check if we already know that the TSC is not
* synchronized:
*/
if (unsynchronized_tsc())
return;
printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
smp_processor_id(), cpu);
/*
* Reset it - in case this is a second bootup:
*/
atomic_set(&stop_count, 0);
/*
* Wait for the target to arrive:
*/
while (atomic_read(&start_count) != cpus-1)
cpu_relax();
/*
* Trigger the target to continue into the measurement too:
*/
atomic_inc(&start_count);
check_tsc_warp();
while (atomic_read(&stop_count) != cpus-1)
cpu_relax();
/*
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
if (nr_warps) {
printk("\n");
printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
" turning off TSC clock.\n", max_warp);
mark_tsc_unstable("check_tsc_sync_source failed");
nr_warps = 0;
max_warp = 0;
last_tsc = 0;
} else {
printk(" passed.\n");
}
/*
* Let the target continue with the bootup:
*/
atomic_inc(&stop_count);
}
/*
* Freshly booted CPUs call into this:
*/
void __cpuinit check_tsc_sync_target(void)
{
int cpus = 2;
if (unsynchronized_tsc())
return;
/*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
*/
atomic_inc(&start_count);
while (atomic_read(&start_count) != cpus)
cpu_relax();
check_tsc_warp();
/*
* Ok, we are done:
*/
atomic_inc(&stop_count);
/*
* Wait for the source CPU to print stuff:
*/
while (atomic_read(&stop_count) != cpus)
cpu_relax();
}
#undef NR_LOOPS

View file

@ -21,6 +21,13 @@
#
# $Id: Makefile,v 1.31 2002/03/22 15:56:07 ak Exp $
# Fill in SRCARCH
SRCARCH := x86
archprepare:
@mkdir -p ${objtree}/arch/x86/kernel
LDFLAGS := -m elf_x86_64
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
LDFLAGS_vmlinux :=
@ -71,10 +78,10 @@ CFLAGS += $(cflags-y)
CFLAGS_KERNEL += $(cflags-kernel-y)
AFLAGS += -m64
head-y := arch/x86_64/kernel/head_64.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task_64.o
head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o
libs-y += arch/x86/lib/
core-y += arch/x86_64/kernel/ \
core-y += arch/x86/kernel/ \
arch/x86/mm/ \
arch/x86/crypto/ \
arch/x86/vdso/

View file

@ -1,5 +0,0 @@
ifeq ($(CONFIG_X86_32),y)
include ${srctree}/arch/x86/kernel/Makefile_32
else
include ${srctree}/arch/x86_64/kernel/Makefile_64
endif

View file

@ -1,5 +0,0 @@
#ifdef CONFIG_X86_32
# include "asm-offsets_32.c"
#else
# include "asm-offsets_64.c"
#endif

View file

@ -1,259 +0,0 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
#include <xen/hvc-console.h>
/* Simple VGA output */
#ifdef __i386__
#include <asm/setup.h>
#else
#include <asm/bootsetup.h>
#endif
#define VGABASE (__ISA_IO_base + 0xb8000)
static int max_ypos = 25, max_xpos = 80;
static int current_ypos = 25, current_xpos = 0;
static void early_vga_write(struct console *con, const char *str, unsigned n)
{
char c;
int i, k, j;
while ((c = *str++) != '\0' && n-- > 0) {
if (current_ypos >= max_ypos) {
/* scroll 1 line up */
for (k = 1, j = 0; k < max_ypos; k++, j++) {
for (i = 0; i < max_xpos; i++) {
writew(readw(VGABASE+2*(max_xpos*k+i)),
VGABASE + 2*(max_xpos*j + i));
}
}
for (i = 0; i < max_xpos; i++)
writew(0x720, VGABASE + 2*(max_xpos*j + i));
current_ypos = max_ypos-1;
}
if (c == '\n') {
current_xpos = 0;
current_ypos++;
} else if (c != '\r') {
writew(((0x7 << 8) | (unsigned short) c),
VGABASE + 2*(max_xpos*current_ypos +
current_xpos++));
if (current_xpos >= max_xpos) {
current_xpos = 0;
current_ypos++;
}
}
}
}
static struct console early_vga_console = {
.name = "earlyvga",
.write = early_vga_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
static int early_serial_base = 0x3f8; /* ttyS0 */
#define XMTRDY 0x20
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
#define RXR 0 /* Receive register (READ) */
#define IER 1 /* Interrupt Enable */
#define IIR 2 /* Interrupt ID */
#define FCR 2 /* FIFO control */
#define LCR 3 /* Line control */
#define MCR 4 /* Modem control */
#define LSR 5 /* Line Status */
#define MSR 6 /* Modem Status */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
return timeout ? 0 : -1;
}
static void early_serial_write(struct console *con, const char *s, unsigned n)
{
while (*s && n-- > 0) {
if (*s == '\n')
early_serial_putc('\r');
early_serial_putc(*s);
s++;
}
}
#define DEFAULT_BAUD 9600
static __init void early_serial_init(char *s)
{
unsigned char c;
unsigned divisor;
unsigned baud = DEFAULT_BAUD;
char *e;
if (*s == ',')
++s;
if (*s) {
unsigned port;
if (!strncmp(s,"0x",2)) {
early_serial_base = simple_strtoul(s, &e, 16);
} else {
static int bases[] = { 0x3f8, 0x2f8 };
if (!strncmp(s,"ttyS",4))
s += 4;
port = simple_strtoul(s, &e, 10);
if (port > 1 || s == e)
port = 0;
early_serial_base = bases[port];
}
s += strcspn(s, ",");
if (*s == ',')
s++;
}
outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(0, early_serial_base + IER); /* no interrupt */
outb(0, early_serial_base + FCR); /* no fifo */
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
if (*s) {
baud = simple_strtoul(s, &e, 0);
if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
}
divisor = 115200 / baud;
c = inb(early_serial_base + LCR);
outb(c | DLAB, early_serial_base + LCR);
outb(divisor & 0xff, early_serial_base + DLL);
outb((divisor >> 8) & 0xff, early_serial_base + DLH);
outb(c & ~DLAB, early_serial_base + LCR);
}
static struct console early_serial_console = {
.name = "earlyser",
.write = early_serial_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Console interface to a host file on AMD's SimNow! */
static int simnow_fd;
enum {
MAGIC1 = 0xBACCD00A,
MAGIC2 = 0xCA110000,
XOPEN = 5,
XWRITE = 4,
};
static noinline long simnow(long cmd, long a, long b, long c)
{
long ret;
asm volatile("cpuid" :
"=a" (ret) :
"b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
return ret;
}
static void __init simnow_init(char *str)
{
char *fn = "klog";
if (*str == '=')
fn = ++str;
/* error ignored */
simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
}
static void simnow_write(struct console *con, const char *s, unsigned n)
{
simnow(XWRITE, simnow_fd, (unsigned long)s, n);
}
static struct console simnow_console = {
.name = "simnow",
.write = simnow_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Direct interface for emergencies */
struct console *early_console = &early_vga_console;
static int early_console_initialized = 0;
void early_printk(const char *fmt, ...)
{
char buf[512];
int n;
va_list ap;
va_start(ap,fmt);
n = vscnprintf(buf,512,fmt,ap);
early_console->write(early_console,buf,n);
va_end(ap);
}
static int __initdata keep_early;
static int __init setup_early_printk(char *buf)
{
if (!buf)
return 0;
if (early_console_initialized)
return 0;
early_console_initialized = 1;
if (strstr(buf, "keep"))
keep_early = 1;
if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console = &early_serial_console;
} else if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf);
early_console = &early_serial_console;
} else if (!strncmp(buf, "vga", 3)
&& SCREEN_INFO.orig_video_isVGA == 1) {
max_xpos = SCREEN_INFO.orig_video_cols;
max_ypos = SCREEN_INFO.orig_video_lines;
current_ypos = SCREEN_INFO.orig_y;
early_console = &early_vga_console;
} else if (!strncmp(buf, "simnow", 6)) {
simnow_init(buf + 6);
early_console = &simnow_console;
keep_early = 1;
#ifdef CONFIG_HVC_XEN
} else if (!strncmp(buf, "xen", 3)) {
early_console = &xenboot_console;
#endif
}
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
return 0;
}
early_param("earlyprintk", setup_early_printk);

View file

@ -1,187 +0,0 @@
/*
* arch/x86_64/kernel/tsc_sync.c: check TSC synchronization.
*
* Copyright (C) 2006, Red Hat, Inc., Ingo Molnar
*
* We check whether all boot CPUs have their TSC's synchronized,
* print a warning if not and turn off the TSC clock-source.
*
* The warp-check is point-to-point between two CPUs, the CPU
* initiating the bootup is the 'source CPU', the freshly booting
* CPU is the 'target CPU'.
*
* Only two CPUs may participate - they can enter in any order.
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
static __cpuinitdata atomic_t start_count;
static __cpuinitdata atomic_t stop_count;
/*
* We use a raw spinlock in this exceptional case, because
* we want to have the fastest, inlined, non-debug version
* of a critical section, to be able to prove TSC time-warps:
*/
static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED;
static __cpuinitdata cycles_t last_tsc;
static __cpuinitdata cycles_t max_warp;
static __cpuinitdata int nr_warps;
/*
* TSC-warp measurement loop running on both CPUs:
*/
static __cpuinit void check_tsc_warp(void)
{
cycles_t start, now, prev, end;
int i;
start = get_cycles_sync();
/*
* The measurement runs for 20 msecs:
*/
end = start + tsc_khz * 20ULL;
now = start;
for (i = 0; ; i++) {
/*
* We take the global lock, measure TSC, save the
* previous TSC that was measured (possibly on
* another CPU) and update the previous TSC timestamp.
*/
__raw_spin_lock(&sync_lock);
prev = last_tsc;
now = get_cycles_sync();
last_tsc = now;
__raw_spin_unlock(&sync_lock);
/*
* Be nice every now and then (and also check whether
* measurement is done [we also insert a 100 million
* loops safety exit, so we dont lock up in case the
* TSC readout is totally broken]):
*/
if (unlikely(!(i & 7))) {
if (now > end || i > 100000000)
break;
cpu_relax();
touch_nmi_watchdog();
}
/*
* Outside the critical section we can now see whether
* we saw a time-warp of the TSC going backwards:
*/
if (unlikely(prev > now)) {
__raw_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
nr_warps++;
__raw_spin_unlock(&sync_lock);
}
}
}
/*
* Source CPU calls into this - it waits for the freshly booted
* target CPU to arrive and then starts the measurement:
*/
void __cpuinit check_tsc_sync_source(int cpu)
{
int cpus = 2;
/*
* No need to check if we already know that the TSC is not
* synchronized:
*/
if (unsynchronized_tsc())
return;
printk(KERN_INFO "checking TSC synchronization [CPU#%d -> CPU#%d]:",
smp_processor_id(), cpu);
/*
* Reset it - in case this is a second bootup:
*/
atomic_set(&stop_count, 0);
/*
* Wait for the target to arrive:
*/
while (atomic_read(&start_count) != cpus-1)
cpu_relax();
/*
* Trigger the target to continue into the measurement too:
*/
atomic_inc(&start_count);
check_tsc_warp();
while (atomic_read(&stop_count) != cpus-1)
cpu_relax();
/*
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
if (nr_warps) {
printk("\n");
printk(KERN_WARNING "Measured %Ld cycles TSC warp between CPUs,"
" turning off TSC clock.\n", max_warp);
mark_tsc_unstable("check_tsc_sync_source failed");
nr_warps = 0;
max_warp = 0;
last_tsc = 0;
} else {
printk(" passed.\n");
}
/*
* Let the target continue with the bootup:
*/
atomic_inc(&stop_count);
}
/*
* Freshly booted CPUs call into this:
*/
void __cpuinit check_tsc_sync_target(void)
{
int cpus = 2;
if (unsynchronized_tsc())
return;
/*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
*/
atomic_inc(&start_count);
while (atomic_read(&start_count) != cpus)
cpu_relax();
check_tsc_warp();
/*
* Ok, we are done:
*/
atomic_inc(&stop_count);
/*
* Wait for the source CPU to print stuff:
*/
while (atomic_read(&stop_count) != cpus)
cpu_relax();
}
#undef NR_LOOPS

View file

@ -1,5 +0,0 @@
#ifdef CONFIG_X86_32
# include "vmlinux_32.lds.S"
#else
# include "vmlinux_64.lds.S"
#endif