Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86

* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (114 commits)
  x86: delete vsyscall files during make clean
  kbuild: fix typo SRCARCH in find_sources
  x86: fix kernel rebuild due to vsyscall fallout
  .gitignore update for x86 arch
  x86: unify include/asm/debugreg_32/64.h
  x86: unify include/asm/unwind_32/64.h
  x86: unify include/asm/types_32/64.h
  x86: unify include/asm/tlb_32/64.h
  x86: unify include/asm/siginfo_32/64.h
  x86: unify include/asm/bug_32/64.h
  x86: unify include/asm/mman_32/64.h
  x86: unify include/asm/agp_32/64.h
  x86: unify include/asm/kdebug_32/64.h
  x86: unify include/asm/ioctls_32/64.h
  x86: unify include/asm/floppy_32/64.h
  x86: apply missing DMA/OOM prevention to floppy_32.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/cache_32/64.h
  x86: unify include/asm/dmi_32/64.h
  x86: unify include/asm/delay_32/64.h
  ...
This commit is contained in:
Linus Torvalds 2007-10-17 13:13:16 -07:00
commit d20ead9e86
252 changed files with 2686 additions and 4292 deletions

1
.gitignore vendored
View file

@ -26,6 +26,7 @@ vmlinux*
!vmlinux.lds.S
System.map
Module.symvers
!.gitignore
#
# Generated include files

View file

@ -347,7 +347,35 @@ connects the CPUs in a SMP system. This means that an error has been detected,
the IO-APIC automatically retry the transmission, so it should not be a big
problem, but you should read the SMP-FAQ.
In this context it could be interesting to note the new irq directory in 2.4.
In 2.6.2* /proc/interrupts was expanded again. This time the goal was for
/proc/interrupts to display every IRQ vector in use by the system, not
just those considered 'most important'. The new vectors are:
THR -- interrupt raised when a machine check threshold counter
(typically counting ECC corrected errors of memory or cache) exceeds
a configurable threshold. Only available on some systems.
TRM -- a thermal event interrupt occurs when a temperature threshold
has been exceeded for the CPU. This interrupt may also be generated
when the temperature drops back to normal.
SPU -- a spurious interrupt is some interrupt that was raised then lowered
by some IO device before it could be fully processed by the APIC. Hence
the APIC sees the interrupt but does not know what device it came from.
For this case the APIC will generate the interrupt with a IRQ vector
of 0xff. This might also be generated by chipset bugs.
RES, CAL, TLB -- rescheduling, call and TLB flush interrupts are
sent from one CPU to another per the needs of the OS. Typically,
their statistics are used by kernel developers and interested users to
determine the occurance of interrupt of the given type.
The above IRQ vectors are displayed only when relevent. For example,
the threshold vector does not exist on x86_64 platforms. Others are
suppressed when the system is a uniprocessor. As of this writing, only
i386 and x86_64 platforms support the new IRQ vector displays.
Of some interest is the introduction of the /proc/irq directory to 2.4.
It could be used to set IRQ to CPU affinity, this means that you can "hook" an
IRQ to only one CPU, or to exclude a CPU of handling IRQs. The contents of the
irq subdir is one subdir for each IRQ, and one file; prof_cpu_mask

View file

@ -994,6 +994,8 @@ and is between 256 and 4096 characters. It is defined in the file
mce [X86-32] Machine Check Exception
mce=option [X86-64] See Documentation/x86_64/boot-options.txt
md= [HW] RAID subsystems devices and level
See Documentation/md.txt.

View file

@ -1325,8 +1325,8 @@ ALLSOURCE_ARCHS := $(ARCH) $(SRCARCH)
endif
define find-sources
( for ARCH in $(ALLSOURCE_ARCHS) ; do \
find $(__srctree)arch/$${SRCARCH} $(RCS_FIND_IGNORE) \
( for arch in $(ALLSOURCE_ARCHS) ; do \
find $(__srctree)arch/$${arch} $(RCS_FIND_IGNORE) \
-name $1 -print; \
done ; \
find $(__srctree)security/selinux/include $(RCS_FIND_IGNORE) \
@ -1334,8 +1334,8 @@ define find-sources
find $(__srctree)include $(RCS_FIND_IGNORE) \
\( -name config -o -name 'asm-*' \) -prune \
-o -name $1 -print; \
for ARCH in $(ALLINCLUDE_ARCHS) ; do \
find $(__srctree)include/asm-$${SRCARCH} $(RCS_FIND_IGNORE) \
for arch in $(ALLINCLUDE_ARCHS) ; do \
find $(__srctree)include/asm-$${arch} $(RCS_FIND_IGNORE) \
-name $1 -print; \
done ; \
find $(__srctree)include/asm-generic $(RCS_FIND_IGNORE) \

1
arch/i386/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
boot

View file

@ -146,6 +146,7 @@ config X86_ELAN
config X86_VOYAGER
bool "Voyager (NCR)"
select SMP if !BROKEN
help
Voyager is an MCA-based 32-way capable SMP architecture proprietary
to NCR Corp. Machine classes 345x/35xx/4100/51xx are Voyager-based.

View file

@ -109,16 +109,42 @@ config MCORE2
help
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and 53xx)
CPUs. You can distinguish newer from older Xeons by the CPU family
in /proc/cpuinfo. Newer ones have 6.
in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo)
config MPENTIUM4
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
help
Select this for Intel Pentium 4 chips. This includes the
Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
(not Pentium M) chips. This option enables compile flags
optimized for the chip, uses the correct cache shift, and
applies any applicable Pentium III optimizations.
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
Pentium-4 M (not Pentium M) chips. This option enables compile
flags optimized for the chip, uses the correct cache line size, and
applies any applicable optimizations.
CPUIDs: F[0-6][1-A] (in /proc/cpuinfo show = cpu family : 15 )
Select this for:
Pentiums (Pentium 4, Pentium D, Celeron, Celeron D) corename:
-Willamette
-Northwood
-Mobile Pentium 4
-Mobile Pentium 4 M
-Extreme Edition (Gallatin)
-Prescott
-Prescott 2M
-Cedar Mill
-Presler
-Smithfiled
Xeons (Intel Xeon, Xeon MP, Xeon LV, Xeon MV) corename:
-Foster
-Prestonia
-Gallatin
-Nocona
-Irwindale
-Cranford
-Potomac
-Paxville
-Dempsey
config MK6
bool "K6/K6-II/K6-III"

View file

@ -18,18 +18,35 @@ $(obj)/syscall32_syscall.o: \
$(foreach F,sysenter syscall,$(obj)/vsyscall-$F.so)
# Teach kbuild about targets
targets := $(foreach F,sysenter syscall,vsyscall-$F.o vsyscall-$F.so)
targets := $(foreach F,$(addprefix vsyscall-,sysenter syscall),\
$F.o $F.so $F.so.dbg)
# The DSO images are built using a special linker script
quiet_cmd_syscall = SYSCALL $@
cmd_syscall = $(CC) -m32 -nostdlib -shared -s \
cmd_syscall = $(CC) -m32 -nostdlib -shared \
$(call ld-option, -Wl$(comma)--hash-style=sysv) \
-Wl,-soname=linux-gate.so.1 -o $@ \
-Wl,-T,$(filter-out FORCE,$^)
$(obj)/vsyscall-sysenter.so $(obj)/vsyscall-syscall.so: \
$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
$(obj)/vsyscall-sysenter.so.dbg $(obj)/vsyscall-syscall.so.dbg: \
$(obj)/vsyscall-%.so.dbg: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
$(call if_changed,syscall)
AFLAGS_vsyscall-sysenter.o = -m32 -Wa,-32
AFLAGS_vsyscall-syscall.o = -m32 -Wa,-32
vdsos := vdso32-sysenter.so vdso32-syscall.so
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(@:vdso32-%.so=$(obj)/vsyscall-%.so.dbg) \
$(MODLIB)/vdso/$@
$(vdsos):
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: $(vdsos)

View file

@ -420,6 +420,8 @@ beyond_if:
(regs)->eflags = 0x200;
(regs)->cs = __USER32_CS;
(regs)->ss = __USER32_DS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 =
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
set_fs(USER_DS);
if (unlikely(current->ptrace & PT_PTRACED)) {
if (current->ptrace & PT_TRACE_EXEC)

View file

@ -112,11 +112,8 @@ struct elf_prpsinfo
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define __STR(x) #x
#define STR(x) __STR(x)
#define _GET_SEG(x) \
({ __u32 seg; asm("movl %%" STR(x) ",%0" : "=r"(seg)); seg; })
({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
/* Assumes current==process to be dumped */
#define ELF_CORE_COPY_REGS(pr_reg, regs) \

View file

@ -228,6 +228,8 @@ static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
return ret;
}
#define COMPAT_GDT_ENTRY_TLS_MIN 6
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{
struct task_struct *child;
@ -246,8 +248,6 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_SYSCALL:
case PTRACE_OLDSETOPTIONS:
case PTRACE_SETOPTIONS:
case PTRACE_SET_THREAD_AREA:
case PTRACE_GET_THREAD_AREA:
return sys_ptrace(request, pid, addr, data);
default:
@ -271,6 +271,12 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
case PTRACE_SETSIGINFO:
case PTRACE_GETSIGINFO:
return ptrace32_siginfo(request, pid, addr, data);
case PTRACE_SET_THREAD_AREA:
case PTRACE_GET_THREAD_AREA:
return sys_ptrace(request, pid,
addr + GDT_ENTRY_TLS_MIN - COMPAT_GDT_ENTRY_TLS_MIN,
data);
}
child = ptrace_get_task_struct(pid);

View file

@ -1 +1,2 @@
vsyscall.lds
vsyscall_32.lds

View file

@ -3,3 +3,7 @@ include ${srctree}/arch/x86/kernel/Makefile_32
else
include ${srctree}/arch/x86/kernel/Makefile_64
endif
# Workaround to delete .lds files with make clean
# The problem is that we do not enter Makefile_32 with make clean.
clean-files := vsyscall*.lds vsyscall*.so

View file

@ -51,7 +51,7 @@ obj-$(CONFIG_SCx200) += scx200_32.o
# We must build both images before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/vsyscall_32.o: $(obj)/vsyscall-int80_32.so $(obj)/vsyscall-sysenter_32.so
targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
targets += $(foreach F,int80 sysenter,vsyscall-$F_32.o vsyscall-$F_32.so)
targets += vsyscall-note_32.o vsyscall_32.lds
# The DSO images are built using a special linker script.

View file

@ -63,11 +63,11 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings. */
asm("\t.data\nintelnops: "
asm("\t.section .rodata, \"a\"\nintelnops: "
GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
GENERIC_NOP7 GENERIC_NOP8);
extern unsigned char intelnops[];
static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
extern const unsigned char intelnops[];
static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
NULL,
intelnops,
intelnops + 1,
@ -81,11 +81,11 @@ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
#endif
#ifdef K8_NOP1
asm("\t.data\nk8nops: "
asm("\t.section .rodata, \"a\"\nk8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8);
extern unsigned char k8nops[];
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
extern const unsigned char k8nops[];
static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
NULL,
k8nops,
k8nops + 1,
@ -99,11 +99,11 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
#endif
#ifdef K7_NOP1
asm("\t.data\nk7nops: "
asm("\t.section .rodata, \"a\"\nk7nops: "
K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
K7_NOP7 K7_NOP8);
extern unsigned char k7nops[];
static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
extern const unsigned char k7nops[];
static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
NULL,
k7nops,
k7nops + 1,
@ -116,28 +116,49 @@ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
};
#endif
#ifdef P6_NOP1
asm("\t.section .rodata, \"a\"\np6nops: "
P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
P6_NOP7 P6_NOP8);
extern const unsigned char p6nops[];
static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
NULL,
p6nops,
p6nops + 1,
p6nops + 1 + 2,
p6nops + 1 + 2 + 3,
p6nops + 1 + 2 + 3 + 4,
p6nops + 1 + 2 + 3 + 4 + 5,
p6nops + 1 + 2 + 3 + 4 + 5 + 6,
p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
#endif
#ifdef CONFIG_X86_64
extern char __vsyscall_0;
static inline unsigned char** find_nop_table(void)
static inline const unsigned char*const * find_nop_table(void)
{
return k8_nops;
return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
}
#else /* CONFIG_X86_64 */
static struct nop {
static const struct nop {
int cpuid;
unsigned char **noptable;
const unsigned char *const *noptable;
} noptypes[] = {
{ X86_FEATURE_K8, k8_nops },
{ X86_FEATURE_K7, k7_nops },
{ X86_FEATURE_P4, p6_nops },
{ X86_FEATURE_P3, p6_nops },
{ -1, NULL }
};
static unsigned char** find_nop_table(void)
static const unsigned char*const * find_nop_table(void)
{
unsigned char **noptable = intel_nops;
const unsigned char *const *noptable = intel_nops;
int i;
for (i = 0; noptypes[i].cpuid >= 0; i++) {
@ -154,7 +175,7 @@ static unsigned char** find_nop_table(void)
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void add_nops(void *insns, unsigned int len)
{
unsigned char **noptable = find_nop_table();
const unsigned char *const *noptable = find_nop_table();
while (len > 0) {
unsigned int noplen = len;
@ -415,9 +436,6 @@ void __init alternative_instructions(void)
alternatives_smp_unlock(__smp_locks, __smp_locks_end,
_text, _etext);
}
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
(unsigned long)__smp_locks_end);
} else {
alternatives_smp_module_add(NULL, "core kernel",
__smp_locks, __smp_locks_end,
@ -428,6 +446,11 @@ void __init alternative_instructions(void)
apply_paravirt(__parainstructions, __parainstructions_end);
local_irq_restore(flags);
if (smp_alt_once)
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
(unsigned long)__smp_locks_end);
restart_nmi();
#ifdef CONFIG_X86_MCE
restart_mce();

View file

@ -1277,6 +1277,7 @@ void smp_spurious_interrupt(struct pt_regs *regs)
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, "
"should never happen.\n", smp_processor_id());
__get_cpu_var(irq_stat).irq_spurious_count++;
irq_exit();
}

View file

@ -974,15 +974,12 @@ void __init setup_boot_APIC_clock (void)
*/
void __cpuinit check_boot_apic_timer_broadcast(void)
{
struct clock_event_device *levt = &per_cpu(lapic_events, boot_cpu_id);
if (!disable_apic_timer ||
(lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
return;
printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
levt->features |= CLOCK_EVT_FEAT_DUMMY;
local_irq_enable();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, &boot_cpu_id);
@ -1143,6 +1140,7 @@ asmlinkage void smp_spurious_interrupt(void)
if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
ack_APIC_irq();
add_pda(irq_spurious_count, 1);
irq_exit();
}

View file

@ -4,6 +4,7 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/mach_apic.h>
#include "cpu.h"
@ -45,13 +46,17 @@ static __cpuinit int amd_apic_timer_broken(void)
case CPUID_XFAM_10H:
case CPUID_XFAM_11H:
rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
if (lo & ENABLE_C1E_MASK)
if (lo & ENABLE_C1E_MASK) {
if (smp_processor_id() != boot_cpu_physical_apicid)
printk(KERN_INFO "AMD C1E detected late. "
" Force timer broadcast.\n");
return 1;
break;
default:
/* err on the side of caution */
}
break;
default:
/* err on the side of caution */
return 1;
}
}
return 0;
}
#endif

View file

@ -565,7 +565,7 @@ static unsigned int powernow_get(unsigned int cpu)
}
static int __init acer_cpufreq_pst(struct dmi_system_id *d)
static int __init acer_cpufreq_pst(const struct dmi_system_id *d)
{
printk(KERN_WARNING "%s laptop with broken PST tables in BIOS detected.\n", d->ident);
printk(KERN_WARNING "You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");

View file

@ -8,6 +8,7 @@
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
#include <asm/uaccess.h>
@ -19,8 +20,6 @@
#include <mach_apic.h>
#endif
extern int trap_init_f00f_bug(void);
#ifdef CONFIG_X86_INTEL_USERCOPY
/*
* Alignment at which movsl is preferred for bulk memory copies.
@ -95,6 +94,20 @@ static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c)
return 1;
}
#ifdef CONFIG_X86_F00F_BUG
static void __cpuinit trap_init_f00f_bug(void)
{
__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
/*
* Update the IDT descriptor and reload the IDT so that
* it uses the read-only mapped virtual address.
*/
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
load_idt(&idt_descr);
}
#endif
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;

View file

@ -170,15 +170,15 @@ union l3_cache {
unsigned val;
};
static const unsigned short assocs[] = {
static unsigned short assocs[] __cpuinitdata = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
[8] = 16, [0xa] = 32, [0xb] = 48,
[0xc] = 64,
[0xf] = 0xffff // ??
};
static const unsigned char levels[] = { 1, 1, 2, 3 };
static const unsigned char types[] = { 1, 2, 3, 3 };
static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
@ -493,8 +493,8 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
}
}
#else
static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif
static void free_cache_attributes(unsigned int cpu)
@ -794,8 +794,9 @@ static int __cpuinit cache_sysfs_init(void)
register_hotcpu_notifier(&cacheinfo_cpu_notifier);
for_each_online_cpu(i) {
cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
(void *)(long)i);
struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
cache_add_dev(sys_dev);
}
return 0;

View file

@ -61,6 +61,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
irq_enter();
vendor_thermal_interrupt(regs);
__get_cpu_var(irq_stat).irq_thermal_count++;
irq_exit();
}

View file

@ -152,7 +152,7 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
static struct notifier_block thermal_throttle_cpu_notifier =
static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
{
.notifier_call = thermal_throttle_cpu_callback,
};

View file

@ -738,13 +738,7 @@ void mtrr_ap_init(void)
*/
void mtrr_save_state(void)
{
int cpu = get_cpu();
if (cpu == 0)
mtrr_save_fixed_ranges(NULL);
else
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
put_cpu();
smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1);
}
static int __init mtrr_init_finialize(void)

View file

@ -34,7 +34,7 @@ struct wd_ops {
u64 checkbit;
};
static struct wd_ops *wd_ops;
static const struct wd_ops *wd_ops;
/* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
* offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
@ -317,7 +317,7 @@ static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
}
static struct wd_ops k7_wd_ops = {
static const struct wd_ops k7_wd_ops = {
.reserve = single_msr_reserve,
.unreserve = single_msr_unreserve,
.setup = setup_k7_watchdog,
@ -380,7 +380,7 @@ static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
}
static struct wd_ops p6_wd_ops = {
static const struct wd_ops p6_wd_ops = {
.reserve = single_msr_reserve,
.unreserve = single_msr_unreserve,
.setup = setup_p6_watchdog,
@ -532,7 +532,7 @@ static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
}
static struct wd_ops p4_wd_ops = {
static const struct wd_ops p4_wd_ops = {
.reserve = p4_reserve,
.unreserve = p4_unreserve,
.setup = setup_p4_watchdog,
@ -550,6 +550,8 @@ static struct wd_ops p4_wd_ops = {
#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
static struct wd_ops intel_arch_wd_ops;
static int setup_intel_arch_watchdog(unsigned nmi_hz)
{
unsigned int ebx;
@ -591,11 +593,11 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
wd->perfctr_msr = perfctr_msr;
wd->evntsel_msr = evntsel_msr;
wd->cccr_msr = 0; //unused
wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1);
intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
return 1;
}
static struct wd_ops intel_arch_wd_ops = {
static struct wd_ops intel_arch_wd_ops __read_mostly = {
.reserve = single_msr_reserve,
.unreserve = single_msr_unreserve,
.setup = setup_intel_arch_watchdog,

View file

@ -43,8 +43,6 @@
static struct class *cpuid_class;
#ifdef CONFIG_SMP
struct cpuid_command {
u32 reg;
u32 *data;
@ -62,25 +60,11 @@ static inline void do_cpuid(int cpu, u32 reg, u32 * data)
{
struct cpuid_command cmd;
preempt_disable();
if (cpu == smp_processor_id()) {
cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
} else {
cmd.reg = reg;
cmd.data = data;
cmd.reg = reg;
cmd.data = data;
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
}
preempt_enable();
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1);
}
#else /* ! CONFIG_SMP */
static inline void do_cpuid(int cpu, u32 reg, u32 * data)
{
cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
}
#endif /* ! CONFIG_SMP */
static loff_t cpuid_seek(struct file *file, loff_t offset, int orig)
{
@ -150,7 +134,7 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
static int cpuid_device_create(int i)
static int __cpuinit cpuid_device_create(int i)
{
int err = 0;
struct device *dev;
@ -161,7 +145,9 @@ static int cpuid_device_create(int i)
return err;
}
static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;

View file

@ -988,7 +988,7 @@ child_rip:
movq %rsi, %rdi
call *%rax
# exit
xorl %edi, %edi
mov %eax, %edi
call do_exit
CFI_ENDPROC
ENDPROC(child_rip)

View file

@ -29,8 +29,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
= { [0 ... NR_CPUS-1] = BAD_APICID };
EXPORT_SYMBOL(x86_cpu_to_apicid);
u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
struct genapic __read_mostly *genapic = &apic_flat;
/*

View file

@ -52,7 +52,6 @@ static void flat_init_apic_ldr(void)
num = smp_processor_id();
id = 1UL << num;
x86_cpu_to_log_apicid[num] = id;
apic_write(APIC_DFR, APIC_DFR_FLAT);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(id);

View file

@ -51,6 +51,15 @@
*/
LOW_PAGES = 1<<(32-PAGE_SHIFT_asm)
/*
* To preserve the DMA pool in PAGEALLOC kernels, we'll allocate
* pagetables from above the 16MB DMA limit, so we'll have to set
* up pagetables 16MB more (worst-case):
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
LOW_PAGES = LOW_PAGES + 0x1000000
#endif
#if PTRS_PER_PMD > 1
PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD
#else
@ -443,6 +452,7 @@ early_page_fault:
early_fault:
cld
#ifdef CONFIG_PRINTK
pusha
movl $(__KERNEL_DS),%eax
movl %eax,%ds
movl %eax,%es
@ -534,8 +544,15 @@ int_msg:
.asciz "Unknown interrupt or fault at EIP %p %p %p\n"
fault_msg:
.ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n"
.asciz "Stack: %p %p %p %p %p %p %p %p\n"
.ascii \
/* fault info: */ "BUG: Int %d: CR2 %p\n" \
/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \
" EBX %p EDX %p ECX %p EAX %p\n" \
/* fault frame: */ " err %p EIP %p CS %p flg %p\n" \
\
"Stack: %p %p %p %p %p %p %p %p\n" \
" %p %p %p %p %p %p %p %p\n" \
" %p %p %p %p %p %p %p %p\n"
#include "../../x86/xen/xen-head.S"

View file

@ -1,4 +1,5 @@
#include <linux/module.h>
#include <asm/semaphore.h>
#include <asm/checksum.h>
#include <asm/desc.h>

View file

@ -349,7 +349,11 @@ static irqreturn_t math_error_irq(int cpl, void *dev_id)
* New motherboards sometimes make IRQ 13 be a PCI interrupt,
* so allow interrupt sharing.
*/
static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL };
static struct irqaction fpu_irq = {
.handler = math_error_irq,
.mask = CPU_MASK_NONE,
.name = "fpu",
};
void __init init_ISA_irqs (void)
{

View file

@ -395,7 +395,11 @@ device_initcall(i8259A_init_sysfs);
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
[0 ... IRQ0_VECTOR - 1] = -1,
[IRQ0_VECTOR] = 0,

View file

@ -1296,6 +1296,11 @@ static void __init setup_IO_APIC_irqs(void)
continue;
}
if (!first_notcon) {
apic_printk(APIC_VERBOSE, " not connected.\n");
first_notcon = 1;
}
entry.trigger = irq_trigger(idx);
entry.polarity = irq_polarity(idx);

View file

@ -875,6 +875,10 @@ static void __init setup_IO_APIC_irqs(void)
apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
continue;
}
if (!first_notcon) {
apic_printk(APIC_VERBOSE, " not connected.\n");
first_notcon = 1;
}
irq = pin_2_irq(idx, apic, pin);
add_pin_to_irq(irq, apic, pin);
@ -885,7 +889,7 @@ static void __init setup_IO_APIC_irqs(void)
}
if (!first_notcon)
apic_printk(APIC_VERBOSE," not connected.\n");
apic_printk(APIC_VERBOSE, " not connected.\n");
}
/*
@ -1845,7 +1849,7 @@ static struct sysdev_class ioapic_sysdev_class = {
static int __init ioapic_init_sysfs(void)
{
struct sys_device * dev;
int i, size, error = 0;
int i, size, error;
error = sysdev_class_register(&ioapic_sysdev_class);
if (error)
@ -1854,12 +1858,11 @@ static int __init ioapic_init_sysfs(void)
for (i = 0; i < nr_ioapics; i++ ) {
size = sizeof(struct sys_device) + nr_ioapic_registers[i]
* sizeof(struct IO_APIC_route_entry);
mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
if (!mp_ioapic_data[i]) {
printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
continue;
}
memset(mp_ioapic_data[i], 0, size);
dev = &mp_ioapic_data[i]->dev;
dev->id = i;
dev->cls = &ioapic_sysdev_class;

View file

@ -255,9 +255,17 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
action = irq_desc[i].action;
if (!action)
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
@ -268,10 +276,12 @@ int show_interrupts(struct seq_file *p, void *v)
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
skip:
@ -280,14 +290,41 @@ skip:
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
seq_printf(p, " Non-maskable interrupts\n");
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).apic_timer_irqs);
seq_putc(p, '\n');
seq_printf(p, " Local timer interrupts\n");
#endif
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_call_count);
seq_printf(p, " function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ",
per_cpu(irq_stat,j).irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));

View file

@ -62,9 +62,17 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
action = irq_desc[i].action;
if (!action)
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
@ -76,9 +84,11 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
@ -86,11 +96,37 @@ skip:
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
seq_putc(p, '\n');
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
seq_putc(p, '\n');
seq_printf(p, " Local timer interrupts\n");
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
seq_printf(p, " function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
seq_printf(p, "THR: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
}
return 0;

View file

@ -92,13 +92,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
struct mm_struct * old_mm;
int retval = 0;
init_MUTEX(&mm->context.sem);
mutex_init(&mm->context.lock);
mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
mutex_lock(&old_mm->context.lock);
retval = copy_ldt(&mm->context, &old_mm->context);
up(&old_mm->context.sem);
mutex_unlock(&old_mm->context.lock);
}
return retval;
}
@ -130,7 +130,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
down(&mm->context.sem);
mutex_lock(&mm->context.lock);
size = mm->context.size*LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
@ -138,7 +138,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
err = 0;
if (copy_to_user(ptr, mm->context.ldt, size))
err = -EFAULT;
up(&mm->context.sem);
mutex_unlock(&mm->context.lock);
if (err < 0)
goto error_return;
if (size != bytecount) {
@ -194,7 +194,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
goto out;
}
down(&mm->context.sem);
mutex_lock(&mm->context.lock);
if (ldt_info.entry_number >= mm->context.size) {
error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
if (error < 0)
@ -221,7 +221,7 @@ install:
error = 0;
out_unlock:
up(&mm->context.sem);
mutex_unlock(&mm->context.lock);
out:
return error;
}

View file

@ -96,13 +96,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
struct mm_struct * old_mm;
int retval = 0;
init_MUTEX(&mm->context.sem);
mutex_init(&mm->context.lock);
mm->context.size = 0;
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
mutex_lock(&old_mm->context.lock);
retval = copy_ldt(&mm->context, &old_mm->context);
up(&old_mm->context.sem);
mutex_unlock(&old_mm->context.lock);
}
return retval;
}
@ -133,7 +133,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
down(&mm->context.sem);
mutex_lock(&mm->context.lock);
size = mm->context.size*LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
@ -141,7 +141,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
err = 0;
if (copy_to_user(ptr, mm->context.ldt, size))
err = -EFAULT;
up(&mm->context.sem);
mutex_unlock(&mm->context.lock);
if (err < 0)
goto error_return;
if (size != bytecount) {
@ -193,7 +193,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
goto out;
}
down(&mm->context.sem);
mutex_lock(&mm->context.lock);
if (ldt_info.entry_number >= (unsigned)mm->context.size) {
error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
if (error < 0)
@ -223,7 +223,7 @@ install:
error = 0;
out_unlock:
up(&mm->context.sem);
mutex_unlock(&mm->context.lock);
out:
return error;
}

View file

@ -695,8 +695,6 @@ static int __init mcheck_disable(char *str)
mce=nobootlog Don't log MCEs from before booting. */
static int __init mcheck_enable(char *str)
{
if (*str == '=')
str++;
if (!strcmp(str, "off"))
mce_dont_init = 1;
else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
@ -709,7 +707,7 @@ static int __init mcheck_enable(char *str)
}
__setup("nomce", mcheck_disable);
__setup("mce", mcheck_enable);
__setup("mce=", mcheck_enable);
/*
* Sysfs support

View file

@ -237,6 +237,7 @@ asmlinkage void mce_threshold_interrupt(void)
}
}
out:
add_pda(irq_threshold_count, 1);
irq_exit();
}

View file

@ -26,6 +26,7 @@ asmlinkage void smp_thermal_interrupt(void)
if (therm_throt_process(msr_val & 1))
mce_log_therm_throt_event(smp_processor_id(), msr_val);
add_pda(irq_thermal_count, 1);
irq_exit();
}

View file

@ -133,7 +133,7 @@ static const struct file_operations msr_fops = {
.open = msr_open,
};
static int msr_device_create(int i)
static int __cpuinit msr_device_create(int i)
{
int err = 0;
struct device *dev;
@ -144,7 +144,7 @@ static int msr_device_create(int i)
return err;
}
static int msr_class_cpu_callback(struct notifier_block *nfb,
static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;

View file

@ -222,10 +222,10 @@ static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
return npages;
}
static inline int translate_phb(struct pci_dev* dev)
static inline int translation_enabled(struct iommu_table *tbl)
{
int disabled = bus_info[dev->bus->number].translation_disabled;
return !disabled;
/* only PHBs with translation enabled have an IOMMU table */
return (tbl != NULL);
}
static void iommu_range_reserve(struct iommu_table *tbl,
@ -388,7 +388,7 @@ static void calgary_unmap_sg(struct device *dev,
struct scatterlist *s;
int i;
if (!translate_phb(to_pci_dev(dev)))
if (!translation_enabled(tbl))
return;
for_each_sg(sglist, s, nelems, i) {
@ -428,7 +428,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long entry;
int i;
if (!translate_phb(to_pci_dev(dev)))
if (!translation_enabled(tbl))
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
for_each_sg(sg, s, nelems, i) {
@ -474,7 +474,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
uaddr = (unsigned long)vaddr;
npages = num_dma_pages(uaddr, size);
if (translate_phb(to_pci_dev(dev)))
if (translation_enabled(tbl))
dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
else
dma_handle = virt_to_bus(vaddr);
@ -488,7 +488,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
struct iommu_table *tbl = find_iommu_table(dev);
unsigned int npages;
if (!translate_phb(to_pci_dev(dev)))
if (!translation_enabled(tbl))
return;
npages = num_dma_pages(dma_handle, size);
@ -513,7 +513,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
goto error;
memset(ret, 0, size);
if (translate_phb(to_pci_dev(dev))) {
if (translation_enabled(tbl)) {
/* set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == bad_dma_address)
@ -1194,7 +1194,7 @@ static int __init calgary_init(void)
{
int ret;
struct pci_dev *dev = NULL;
void *tce_space;
struct calgary_bus_info *info;
ret = calgary_locate_bbars();
if (ret)
@ -1206,12 +1206,14 @@ static int __init calgary_init(void)
break;
if (!is_cal_pci_dev(dev->device))
continue;
if (!translate_phb(dev)) {
info = &bus_info[dev->bus->number];
if (info->translation_disabled) {
calgary_init_one_nontraslated(dev);
continue;
}
tce_space = bus_info[dev->bus->number].tce_space;
if (!tce_space && !translate_empty_slots)
if (!info->tce_space && !translate_empty_slots)
continue;
ret = calgary_init_one(dev);
@ -1229,11 +1231,13 @@ error:
break;
if (!is_cal_pci_dev(dev->device))
continue;
if (!translate_phb(dev)) {
info = &bus_info[dev->bus->number];
if (info->translation_disabled) {
pci_dev_put(dev);
continue;
}
if (!bus_info[dev->bus->number].tce_space && !translate_empty_slots)
if (!info->tce_space && !translate_empty_slots)
continue;
calgary_disable_translation(dev);
@ -1546,7 +1550,7 @@ static void __init calgary_fixup_one_tce_space(struct pci_dev *dev)
static int __init calgary_fixup_tce_spaces(void)
{
struct pci_dev *dev = NULL;
void *tce_space;
struct calgary_bus_info *info;
if (no_iommu || swiotlb || !calgary_detected)
return -ENODEV;
@ -1559,11 +1563,12 @@ static int __init calgary_fixup_tce_spaces(void)
break;
if (!is_cal_pci_dev(dev->device))
continue;
if (!translate_phb(dev))
info = &bus_info[dev->bus->number];
if (info->translation_disabled)
continue;
tce_space = bus_info[dev->bus->number].tce_space;
if (!tce_space)
if (!info->tce_space)
continue;
calgary_fixup_one_tce_space(dev);

View file

@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <asm/io.h>
struct dma_coherent_mem {

View file

@ -51,11 +51,9 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
{
struct page *page;
int node;
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
node = pcibus_to_node(to_pci_dev(dev)->bus);
else
#endif
node = dev_to_node(dev);
if (node == -1)
node = numa_node_id();
if (node < first_node(node_online_map))

View file

@ -8,6 +8,7 @@
* See Documentation/DMA-mapping.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
*/
#include <linux/types.h>
@ -375,7 +376,8 @@ static inline int dma_map_cont(struct scatterlist *start, int nelems,
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int dir)
{
int i;
int out;

View file

@ -165,7 +165,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_
seg &= ~7UL;
down(&child->mm->context.sem);
mutex_lock(&child->mm->context.lock);
if (unlikely((seg >> 3) >= child->mm->context.size))
addr = -1L; /* bogus selector, access would fault */
else {
@ -179,7 +179,7 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_
addr &= 0xffff;
addr += base;
}
up(&child->mm->context.sem);
mutex_unlock(&child->mm->context.lock);
}
return addr;
}

View file

@ -103,7 +103,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
seg &= ~7UL;
down(&child->mm->context.sem);
mutex_lock(&child->mm->context.lock);
if (unlikely((seg >> 3) >= child->mm->context.size))
addr = -1L; /* bogus selector, access would fault */
else {
@ -117,7 +117,7 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
addr &= 0xffff;
addr += base;
}
up(&child->mm->context.sem);
mutex_unlock(&child->mm->context.lock);
}
return addr;

View file

@ -604,7 +604,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
level = cpuid_eax(1);
if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
if (c->x86 == 0x10)
if (c->x86 == 0x10 || c->x86 == 0x11)
set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
/* Enable workaround for FXSAVE leak */
@ -968,7 +968,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
* applications want to get the raw CPUID data, they should access
* /dev/cpu/<cpu_nr>/cpuid instead.
*/
static char *x86_cap_flags[] = {
static const char *const x86_cap_flags[] = {
/* Intel-defined */
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@ -1022,7 +1022,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
static char *x86_power_flags[] = {
static const char *const x86_power_flags[] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */

View file

@ -385,7 +385,6 @@ static int setup_frame(int sig, struct k_sigaction *ka,
regs->edx = (unsigned long) 0;
regs->ecx = (unsigned long) 0;
set_fs(USER_DS);
regs->xds = __USER_DS;
regs->xes = __USER_DS;
regs->xss = __USER_DS;
@ -479,7 +478,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->edx = (unsigned long) &frame->info;
regs->ecx = (unsigned long) &frame->uc;
set_fs(USER_DS);
regs->xds = __USER_DS;
regs->xes = __USER_DS;
regs->xss = __USER_DS;

View file

@ -342,6 +342,7 @@ fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
__get_cpu_var(irq_stat).irq_tlb_count++;
}
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
@ -640,6 +641,7 @@ static void native_smp_send_stop(void)
fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
__get_cpu_var(irq_stat).irq_resched_count++;
}
fastcall void smp_call_function_interrupt(struct pt_regs *regs)
@ -660,6 +662,7 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs)
*/
irq_enter();
(*func)(info);
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {
@ -705,3 +708,10 @@ struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask,
};
int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
void *info, int wait)
{
return smp_ops.smp_call_function_mask(mask, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function_mask);

View file

@ -163,6 +163,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
out:
ack_APIC_irq();
cpu_clear(cpu, f->flush_cpumask);
add_pda(irq_tlb_count, 1);
}
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
@ -493,6 +494,7 @@ void smp_send_stop(void)
asmlinkage void smp_reschedule_interrupt(void)
{
ack_APIC_irq();
add_pda(irq_resched_count, 1);
}
asmlinkage void smp_call_function_interrupt(void)
@ -514,6 +516,7 @@ asmlinkage void smp_call_function_interrupt(void)
exit_idle();
irq_enter();
(*func)(info);
add_pda(irq_call_count, 1);
irq_exit();
if (wait) {
mb();

View file

@ -102,8 +102,8 @@ u8 apicid_2_node[MAX_APICID];
* Trampoline 80x86 program as an array.
*/
extern unsigned char trampoline_data [];
extern unsigned char trampoline_end [];
extern const unsigned char trampoline_data [];
extern const unsigned char trampoline_end [];
static unsigned char *trampoline_base;
static int trampoline_exec;
@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
* has made sure it's suitably aligned.
*/
static unsigned long __devinit setup_trampoline(void)
static unsigned long __cpuinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
return virt_to_phys(trampoline_base);
@ -1021,6 +1021,12 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if (!max_cpus) {
smp_found_config = 0;
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
if (nmi_watchdog == NMI_LOCAL_APIC) {
printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
connect_bsp_APIC();
setup_local_APIC();
}
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
cpu_set(0, per_cpu(cpu_sibling_map, 0));

View file

@ -102,8 +102,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
* Trampoline 80x86 program as an array.
*/
extern unsigned char trampoline_data[];
extern unsigned char trampoline_end[];
extern const unsigned char trampoline_data[];
extern const unsigned char trampoline_end[];
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
@ -695,7 +695,6 @@ do_rest:
cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map);
x86_cpu_to_apicid[cpu] = BAD_APICID;
x86_cpu_to_log_apicid[cpu] = BAD_APICID;
return -EIO;
}

View file

@ -33,7 +33,7 @@ static void save_stack_address(void *data, unsigned long addr)
trace->entries[trace->nr_entries++] = addr;
}
static struct stacktrace_ops save_stack_ops = {
static const struct stacktrace_ops save_stack_ops = {
.warning = save_stack_warning,
.warning_symbol = save_stack_warning_symbol,
.stack = save_stack_stack,

View file

@ -40,9 +40,9 @@ static inline void flush_tce(void* tceaddr)
{
/* a single tce can't cross a cache line */
if (cpu_has_clflush)
asm volatile("clflush (%0)" :: "r" (tceaddr));
clflush(tceaddr);
else
asm volatile("wbinvd":::"memory");
wbinvd();
}
void tce_build(struct iommu_table *tbl, unsigned long index,

View file

@ -44,15 +44,15 @@ int arch_register_cpu(int num)
* Also certain PCI quirks require not to enable hotplug control
* for all CPU's.
*/
if (num && enable_cpu_hotplug)
#ifdef CONFIG_HOTPLUG_CPU
if (num)
cpu_devices[num].cpu.hotpluggable = 1;
#endif
return register_cpu(&cpu_devices[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
int enable_cpu_hotplug = 1;
void arch_unregister_cpu(int num) {
return unregister_cpu(&cpu_devices[num].cpu);
}

View file

@ -36,11 +36,11 @@
#include <asm/segment.h>
#include <asm/page.h>
.data
/* We can free up trampoline after bootup if cpu hotplug is not supported. */
#ifndef CONFIG_HOTPLUG_CPU
.section ".init.data","aw",@progbits
#else
.section .rodata,"a",@progbits
#endif
.code16

View file

@ -33,7 +33,12 @@
#include <asm/msr.h>
#include <asm/segment.h>
.data
/* We can free up trampoline after bootup if cpu hotplug is not supported. */
#ifndef CONFIG_HOTPLUG_CPU
.section .init.data, "aw", @progbits
#else
.section .rodata, "a", @progbits
#endif
.code16

View file

@ -112,7 +112,7 @@ struct stack_frame {
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
#ifdef CONFIG_FRAME_POINTER
struct stack_frame *frame = (struct stack_frame *)ebp;
@ -149,7 +149,7 @@ static inline unsigned long print_context_stack(struct thread_info *tinfo,
void dump_trace(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
unsigned long ebp = 0;
@ -221,7 +221,7 @@ static void print_trace_address(void *data, unsigned long addr)
touch_nmi_watchdog();
}
static struct stacktrace_ops print_trace_ops = {
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,
@ -398,31 +398,24 @@ void die(const char * str, struct pt_regs * regs, long err)
local_save_flags(flags);
if (++die.lock_owner_depth < 3) {
int nl = 0;
unsigned long esp;
unsigned short ss;
report_bug(regs->eip, regs);
printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
++die_counter);
#ifdef CONFIG_PREEMPT
printk(KERN_EMERG "PREEMPT ");
nl = 1;
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
if (!nl)
printk(KERN_EMERG);
printk("SMP ");
nl = 1;
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!nl)
printk(KERN_EMERG);
printk("DEBUG_PAGEALLOC");
nl = 1;
#endif
if (nl)
printk("\n");
printk("\n");
if (notify_die(DIE_OOPS, str, regs, err,
current->thread.trap_no, SIGSEGV) !=
NOTIFY_STOP) {
@ -1112,20 +1105,6 @@ asmlinkage void math_emulate(long arg)
#endif /* CONFIG_MATH_EMULATION */
#ifdef CONFIG_X86_F00F_BUG
void __init trap_init_f00f_bug(void)
{
__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
/*
* Update the IDT descriptor and reload the IDT so that
* it uses the read-only mapped virtual address.
*/
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
load_idt(&idt_descr);
}
#endif
/*
* This needs to use 'idt_table' rather than 'idt', and
* thus use the _nonmapped_ version of the IDT, as the

View file

@ -215,7 +215,7 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
unsigned long *stack,
struct stacktrace_ops *ops, void *data)
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
@ -336,7 +336,7 @@ static void print_trace_address(void *data, unsigned long addr)
printk_address(addr);
}
static struct stacktrace_ops print_trace_ops = {
static const struct stacktrace_ops print_trace_ops = {
.warning = print_trace_warning,
.warning_symbol = print_trace_warning_symbol,
.stack = print_trace_stack,

View file

@ -349,10 +349,10 @@ __cpuinit int unsynchronized_tsc(void)
static void __init check_geode_tsc_reliable(void)
{
unsigned long val;
unsigned long res_low, res_high;
rdmsrl(MSR_GEODE_BUSCONT_CONF0, val);
if ((val & RTSC_SUSP))
rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
if (res_low & RTSC_SUSP)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
#else

View file

@ -78,7 +78,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.sys_tz = sys_tz;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
@ -289,7 +288,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
unsigned long *d;
unsigned long node = 0;
#ifdef CONFIG_NUMA
node = cpu_to_node[cpu];
node = cpu_to_node(cpu);
#endif
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);

View file

@ -14,7 +14,7 @@ find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len)
/* could test bitsliced, but it's hardly worth it */
end = n+len;
if (end >= nbits)
if (end > nbits)
return -1;
for (i = n+1; i < end; i++) {
if (test_bit(i, bitmap)) {

View file

@ -26,27 +26,18 @@ static void __rdmsr_safe_on_cpu(void *info)
static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
{
int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
if (safe)
err = rdmsr_safe(msr_no, l, h);
else
rdmsr(msr_no, *l, *h);
else {
struct msr_info rv;
struct msr_info rv;
rv.msr_no = msr_no;
if (safe) {
smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
&rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
}
*l = rv.l;
*h = rv.h;
rv.msr_no = msr_no;
if (safe) {
smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1);
}
preempt_enable();
*l = rv.l;
*h = rv.h;
return err;
}
@ -67,27 +58,18 @@ static void __wrmsr_safe_on_cpu(void *info)
static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
{
int err = 0;
preempt_disable();
if (smp_processor_id() == cpu)
if (safe)
err = wrmsr_safe(msr_no, l, h);
else
wrmsr(msr_no, l, h);
else {
struct msr_info rv;
struct msr_info rv;
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
if (safe) {
smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
&rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
}
rv.msr_no = msr_no;
rv.l = l;
rv.h = h;
if (safe) {
smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1);
err = rv.err;
} else {
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1);
}
preempt_enable();
return err;
}

View file

@ -2,7 +2,7 @@
#include <linux/linkage.h>
#include <asm/rwlock.h>
#include <asm/alternative-asm.i>
#include <asm/alternative-asm.h>
#include <asm/dwarf2.h>
/* rdi: pointer to rwlock_t */

View file

@ -15,8 +15,8 @@
#include <linux/linkage.h>
#include <asm/rwlock.h>
#include <asm/alternative-asm.i>
#include <asm/frame.i>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>
/*

View file

@ -160,26 +160,6 @@ char *strchr(const char * s, int c)
EXPORT_SYMBOL(strchr);
#endif
#ifdef __HAVE_ARCH_STRRCHR
char *strrchr(const char * s, int c)
{
int d0, d1;
char * res;
asm volatile( "movb %%al,%%ah\n"
"1:\tlodsb\n\t"
"cmpb %%ah,%%al\n\t"
"jne 2f\n\t"
"leal -1(%%esi),%0\n"
"2:\ttestb %%al,%%al\n\t"
"jne 1b"
:"=g" (res), "=&S" (d0), "=&a" (d1)
:"0" (0),"1" (s),"2" (c)
:"memory");
return res;
}
EXPORT_SYMBOL(strrchr);
#endif
#ifdef __HAVE_ARCH_STRLEN
size_t strlen(const char * s)
{

View file

@ -35,7 +35,11 @@ void __init pre_intr_init_hook(void)
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
/**
* intr_init_hook - post gate setup interrupt initialisation

View file

@ -46,11 +46,11 @@
* ES7000 Globals
*/
volatile unsigned long *psai = NULL;
struct mip_reg *mip_reg;
struct mip_reg *host_reg;
int mip_port;
unsigned long mip_addr, host_addr;
static volatile unsigned long *psai = NULL;
static struct mip_reg *mip_reg;
static struct mip_reg *host_reg;
static int mip_port;
static unsigned long mip_addr, host_addr;
/*
* GSI override for ES7000 platforms.
@ -288,28 +288,8 @@ es7000_start_cpu(int cpu, unsigned long eip)
}
int
es7000_stop_cpu(int cpu)
{
int startup;
if (psai == NULL)
return -1;
startup= (0x1000000 | cpu);
while ((*psai & 0xff00ffff) != startup)
;
startup = (*psai & 0xff0000) >> 16;
*psai &= 0xffffff;
return 0;
}
void __init
es7000_sw_apic()
es7000_sw_apic(void)
{
if (es7000_plat) {
int mip_status;

View file

@ -22,7 +22,7 @@ extern struct genapic apic_default;
struct genapic *genapic = &apic_default;
struct genapic *apic_probe[] __initdata = {
static struct genapic *apic_probe[] __initdata = {
&apic_summit,
&apic_bigsmp,
&apic_es7000,

View file

@ -18,7 +18,11 @@ void __init pre_intr_init_hook(void)
/*
* IRQ2 is cascade interrupt to second interrupt controller
*/
static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
static struct irqaction irq2 = {
.handler = no_action,
.mask = CPU_MASK_NONE,
.name = "cascade",
};
void __init intr_init_hook(void)
{

View file

@ -442,8 +442,8 @@ static __u32 __init
setup_trampoline(void)
{
/* these two are global symbols in trampoline.S */
extern __u8 trampoline_end[];
extern __u8 trampoline_data[];
extern const __u8 trampoline_end[];
extern const __u8 trampoline_data[];
memcpy((__u8 *)trampoline_base, trampoline_data,
trampoline_end - trampoline_data);
@ -1037,6 +1037,7 @@ smp_call_function_interrupt(void)
*/
irq_enter();
(*func)(info);
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {
mb();

View file

@ -103,14 +103,14 @@ extern unsigned long highend_pfn, highstart_pfn;
#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
unsigned long node_remap_start_pfn[MAX_NUMNODES];
static unsigned long node_remap_start_pfn[MAX_NUMNODES];
unsigned long node_remap_size[MAX_NUMNODES];
unsigned long node_remap_offset[MAX_NUMNODES];
void *node_remap_start_vaddr[MAX_NUMNODES];
static unsigned long node_remap_offset[MAX_NUMNODES];
static void *node_remap_start_vaddr[MAX_NUMNODES];
void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
void *node_remap_end_vaddr[MAX_NUMNODES];
void *node_remap_alloc_vaddr[MAX_NUMNODES];
static void *node_remap_end_vaddr[MAX_NUMNODES];
static void *node_remap_alloc_vaddr[MAX_NUMNODES];
static unsigned long kva_start_pfn;
static unsigned long kva_pages;
/*

View file

@ -105,7 +105,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
LDT and other horrors are only used in user space. */
if (seg & (1<<2)) {
/* Must lock the LDT while reading it. */
down(&current->mm->context.sem);
mutex_lock(&current->mm->context.lock);
desc = current->mm->context.ldt;
desc = (void *)desc + (seg & ~7);
} else {
@ -118,7 +118,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
base = get_desc_base((unsigned long *)desc);
if (seg & (1<<2)) {
up(&current->mm->context.sem);
mutex_unlock(&current->mm->context.lock);
} else
put_cpu();
@ -539,23 +539,22 @@ no_context:
printk(KERN_ALERT "BUG: unable to handle kernel paging"
" request");
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT " printing eip:\n");
printk("%08lx\n", regs->eip);
printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
page = read_cr3();
page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
#ifdef CONFIG_X86_PAE
printk(KERN_ALERT "*pdpt = %016Lx\n", page);
printk("*pdpt = %016Lx ", page);
if ((page >> PAGE_SHIFT) < max_low_pfn
&& page & _PAGE_PRESENT) {
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
& (PTRS_PER_PMD - 1)];
printk(KERN_ALERT "*pde = %016Lx\n", page);
printk(KERN_ALERT "*pde = %016Lx ", page);
page &= ~_PAGE_NX;
}
#else
printk(KERN_ALERT "*pde = %08lx\n", page);
printk("*pde = %08lx ", page);
#endif
/*
@ -569,8 +568,10 @@ no_context:
page &= PAGE_MASK;
page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
& (PTRS_PER_PTE - 1)];
printk(KERN_ALERT "*pte = %0*Lx\n", sizeof(page)*2, (u64)page);
printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
}
printk("\n");
}
tsk->thread.cr2 = address;

View file

@ -85,13 +85,20 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
pte_t *page_table = NULL;
#ifdef CONFIG_DEBUG_PAGEALLOC
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
page_table =
(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
BUG_ON(page_table != pte_offset_kernel(pmd, 0));
}
return pte_offset_kernel(pmd, 0);
}

View file

@ -166,7 +166,7 @@ early_node_mem(int nodeid, unsigned long start, unsigned long end,
return __va(mem);
ptr = __alloc_bootmem_nopanic(size,
SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
if (ptr == 0) {
if (ptr == NULL) {
printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
size, nodeid);
return NULL;
@ -261,7 +261,7 @@ void __init numa_init_array(void)
We round robin the existing nodes. */
rr = first_node(node_online_map);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] != NUMA_NO_NODE)
if (cpu_to_node(i) != NUMA_NO_NODE)
continue;
numa_set_node(i, rr);
rr = next_node(rr, node_online_map);
@ -543,7 +543,7 @@ __cpuinit void numa_add_cpu(int cpu)
void __cpuinit numa_set_node(int cpu, int node)
{
cpu_pda(cpu)->nodenumber = node;
cpu_to_node[cpu] = node;
cpu_to_node(cpu) = node;
}
unsigned long __init numa_free_all_bootmem(void)

View file

@ -70,10 +70,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
static void cache_flush_page(struct page *p)
{
unsigned long adr = (unsigned long)page_address(p);
void *adr = page_address(p);
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i));
clflush(adr+i);
}
static void flush_kernel_map(void *arg)

View file

@ -65,7 +65,7 @@ static void cache_flush_page(void *adr)
{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i));
clflush(adr+i);
}
static void flush_kernel_map(void *arg)
@ -148,6 +148,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
pgprot_val(ref_prot2) &= ~_PAGE_NX;
set_pte(kpte, mk_pte(split, ref_prot2));
kpte_page = split;
}

View file

@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
@ -39,6 +40,8 @@ void show_mem(void)
for_each_online_pgdat(pgdat) {
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
touch_nmi_watchdog();
page = pgdat_page_nr(pgdat, i);
total++;
if (PageHighMem(page))
@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
}
pte = pte_offset_kernel(pmd, vaddr);
if (pgprot_val(flags))
/* <pfn,flags> stored as-is, to permit clearing entries */
set_pte(pte, pfn_pte(pfn, flags));
set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
else
pte_clear(&init_mm, vaddr, pte);

View file

@ -431,9 +431,9 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
for (i = 0; i < NR_CPUS; i++) {
if (cpu_to_node[i] == NUMA_NO_NODE)
if (cpu_to_node(i) == NUMA_NO_NODE)
continue;
if (!node_isset(cpu_to_node[i], node_possible_map))
if (!node_isset(cpu_to_node(i), node_possible_map))
numa_set_node(i, NUMA_NO_NODE);
}
numa_init_array();

View file

@ -269,7 +269,6 @@ static void nmi_cpu_shutdown(void * dummy)
apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
apic_write(APIC_LVTERR, v);
nmi_restore_registers(msrs);
model->shutdown(msrs);
}
@ -278,6 +277,7 @@ static void nmi_shutdown(void)
nmi_enabled = 0;
on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
unregister_die_notifier(&profile_exceptions_nb);
model->shutdown(cpu_msrs);
free_msrs();
}

View file

@ -289,6 +289,22 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL385 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL585 G2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
},
},
#ifdef __i386__
{
.callback = assign_all_busses,

View file

@ -13,7 +13,7 @@ vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
$(obj)/vdso.o: $(obj)/vdso.so
targets += vdso.so vdso.lds $(vobjs-y) vdso-syms.o
targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) vdso-syms.o
# The DSO images are built using a special linker script.
quiet_cmd_syscall = SYSCALL $@
@ -26,12 +26,19 @@ vdso-flags = -fPIC -shared -Wl,-soname=linux-vdso.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv) \
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
SYSCFLAGS_vdso.so = $(vdso-flags)
SYSCFLAGS_vdso.so.dbg = $(vdso-flags)
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
$(obj)/vdso.so: $(src)/vdso.lds $(vobjs) FORCE
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,syscall)
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
CFL := $(PROFILING) -mcmodel=small -fPIC -g0 -O2 -fasynchronous-unwind-tables -m64
$(obj)/vclock_gettime.o: KBUILD_CFLAGS = $(CFL)
@ -47,3 +54,11 @@ $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
SYSCFLAGS_vdso-syms.o = -r -d
$(obj)/vdso-syms.o: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,syscall)
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
vdso.so:
@mkdir -p $(MODLIB)/vdso
$(call cmd,vdso_install)
vdso_install: vdso.so

View file

@ -26,13 +26,16 @@ SECTIONS
is insufficient, ld -shared will barf. Just increase it here. */
. = VDSO_PRELINK + VDSO_TEXT_OFFSET;
.text : { *(.text) } :text
.text.ptr : { *(.text.ptr) } :text
. = VDSO_PRELINK + 0x900;
.data : { *(.data) } :text
.bss : { *(.bss) } :text
.text : { *(.text*) } :text
.rodata : { *(.rodata*) } :text
.data : {
*(.data*)
*(.sdata*)
*(.bss*)
*(.dynbss*)
} :text
.altinstructions : { *(.altinstructions) } :text
.altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text
.note : { *(.note.*) } :text :note
@ -42,7 +45,6 @@ SECTIONS
.useless : {
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.dynbss)
*(.gnu.linkonce.b.*)
} :text
}

View file

@ -8,5 +8,5 @@
#include <asm/timex.h>
#include <asm/vgtod.h>
#define VEXTERN(x) typeof (__ ## x) *vdso_ ## x = (void *)VMAGIC;
#define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC;
#include "vextern.h"

View file

@ -356,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
*/
irq_enter();
(*func)(info);
__get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {

View file

@ -175,14 +175,12 @@ config MK8
config MPSC
bool "Intel P4 / older Netburst based Xeon"
help
Optimize for Intel Pentium 4 and older Nocona/Dempsey Xeon CPUs
with Intel Extended Memory 64 Technology(EM64T). For details see
<http://www.intel.com/technology/64bitextensions/>.
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
Xeon CPUs with Intel 64bit which is compatible with x86-64.
Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
Netburst core and shouldn't use this option. You can distinguish them
using the cpu family field
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one
(this rule only applies to systems that support EM64T)
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
config MCORE2
bool "Intel Core2 / newer Xeon"
@ -190,8 +188,7 @@ config MCORE2
Optimize for Intel Core2 and newer Xeons (51xx)
You can distinguish the newer Xeons from the older ones using
the cpu family field in /proc/cpuinfo. 15 is an older Xeon
(use CONFIG_MPSC then), 6 is a newer one. This rule only
applies to CPUs that support EM64T.
(use CONFIG_MPSC then), 6 is a newer one.
config GENERIC_CPU
bool "Generic-x86-64"
@ -476,8 +473,9 @@ config HPET_TIMER
<http://www.intel.com/hardwaredesign/hpetspec.htm>.
config HPET_EMULATE_RTC
bool "Provide RTC interrupt"
bool
depends on HPET_TIMER && RTC=y
default y
# Mark as embedded because too many people got it wrong.
# The code disables itself when not needed.

View file

@ -110,9 +110,15 @@ bzdisk: vmlinux
fdimage fdimage144 fdimage288 isoimage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
install:
install: vdso_install
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
vdso_install:
ifeq ($(CONFIG_IA32_EMULATION),y)
$(Q)$(MAKE) $(build)=arch/x86/ia32 $@
endif
$(Q)$(MAKE) $(build)=arch/x86/vdso $@
archclean:
$(Q)rm -rf $(objtree)/arch/x86_64/boot
$(Q)$(MAKE) $(clean)=$(boot)

View file

@ -221,7 +221,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
asm volatile("clflush %0" : : "m" (*(char *)(page+offset)));
clflush((char *)page+offset);
efficeon_private.l1_table[index] = page;
@ -268,15 +268,16 @@ static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int t
*page = insert;
/* clflush is slow, so don't clflush until we have to */
if ( last_page &&
((unsigned long)page^(unsigned long)last_page) & clflush_mask )
asm volatile("clflush %0" : : "m" (*last_page));
if (last_page &&
(((unsigned long)page^(unsigned long)last_page) &
clflush_mask))
clflush(last_page);
last_page = page;
}
if ( last_page )
asm volatile("clflush %0" : : "m" (*last_page));
clflush(last_page);
agp_bridge->driver->tlb_flush(mem);
return 0;

View file

@ -15,7 +15,6 @@ config VGASTATE
config VIDEO_OUTPUT_CONTROL
tristate "Lowlevel video output switch controls"
default m
help
This framework adds support for low-level control of the video
output switch.

View file

@ -1,6 +1,6 @@
#ifndef __UM_ALTERNATIVE_ASM_I
#define __UM_ALTERNATIVE_ASM_I
#include "asm/arch/alternative-asm.i"
#include "asm/arch/alternative-asm.h"
#endif

View file

@ -1,6 +1,6 @@
#ifndef __UM_FRAME_I
#define __UM_FRAME_I
#include "asm/arch/frame.i"
#include "asm/arch/frame.h"
#endif

View file

@ -1,40 +1,22 @@
include include/asm-generic/Kbuild.asm
header-y += boot.h
header-y += debugreg_32.h
header-y += debugreg_64.h
header-y += debugreg.h
header-y += ldt_32.h
header-y += ldt_64.h
header-y += ldt.h
header-y += msr-index.h
header-y += prctl.h
header-y += ptrace-abi_32.h
header-y += ptrace-abi_64.h
header-y += ptrace-abi.h
header-y += sigcontext32.h
header-y += ucontext_32.h
header-y += ucontext_64.h
header-y += ucontext.h
header-y += vsyscall32.h
unifdef-y += a.out_32.h
unifdef-y += a.out_64.h
unifdef-y += auxvec_32.h
unifdef-y += auxvec_64.h
unifdef-y += byteorder_32.h
unifdef-y += byteorder_64.h
unifdef-y += elf_32.h
unifdef-y += elf_64.h
unifdef-y += errno_32.h
unifdef-y += errno_64.h
unifdef-y += ioctls_32.h
unifdef-y += ioctls_64.h
unifdef-y += ipcbuf_32.h
unifdef-y += ipcbuf_64.h
unifdef-y += mce.h
unifdef-y += mman_32.h
unifdef-y += mman_64.h
unifdef-y += msgbuf_32.h
unifdef-y += msgbuf_64.h
unifdef-y += msr_32.h
@ -45,40 +27,22 @@ unifdef-y += mtrr_64.h
unifdef-y += mtrr.h
unifdef-y += page_32.h
unifdef-y += page_64.h
unifdef-y += param_32.h
unifdef-y += param_64.h
unifdef-y += posix_types_32.h
unifdef-y += posix_types_64.h
unifdef-y += ptrace_32.h
unifdef-y += ptrace_64.h
unifdef-y += resource_32.h
unifdef-y += resource_64.h
unifdef-y += sembuf_32.h
unifdef-y += sembuf_64.h
unifdef-y += setup_32.h
unifdef-y += setup_64.h
unifdef-y += shmbuf_32.h
unifdef-y += shmbuf_64.h
unifdef-y += shmparam_32.h
unifdef-y += shmparam_64.h
unifdef-y += sigcontext_32.h
unifdef-y += sigcontext_64.h
unifdef-y += siginfo_32.h
unifdef-y += siginfo_64.h
unifdef-y += signal_32.h
unifdef-y += signal_64.h
unifdef-y += sockios_32.h
unifdef-y += sockios_64.h
unifdef-y += stat_32.h
unifdef-y += stat_64.h
unifdef-y += statfs_32.h
unifdef-y += statfs_64.h
unifdef-y += termbits_32.h
unifdef-y += termbits_64.h
unifdef-y += termios_32.h
unifdef-y += termios_64.h
unifdef-y += types_32.h
unifdef-y += types_64.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
unifdef-y += user_32.h

View file

@ -1,5 +1,40 @@
#ifdef CONFIG_X86_32
# include "agp_32.h"
#else
# include "agp_64.h"
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same
* page. This avoids data corruption on some CPUs.
*/
/*
* Caller's responsibility to call global_flush_tlb() for performance
* reasons
*/
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
#define flush_agp_mappings() global_flush_tlb()
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
* need to be called for each cacheline of the whole page so it may
* not be worth it. Would need a page for it.
*/
#define flush_agp_cache() wbinvd()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -1,36 +0,0 @@
#ifndef AGP_H
#define AGP_H 1
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU.
* The GART gives the CPU a physical alias of pages in memory. The alias region is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page. This avoids
* data corruption on some CPUs.
*/
/* Caller's responsibility to call global_flush_tlb() for
* performance reasons */
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() wbinvd()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -1,34 +0,0 @@
#ifndef AGP_H
#define AGP_H 1
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent.
* The GART gives the CPU a physical alias of memory. The alias is
* mapped uncacheable. Make sure there are no conflicting mappings
* with different cachability attributes for the same page.
*/
/* Caller's responsibility to call global_flush_tlb() for
* performance reasons */
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif

View file

@ -0,0 +1,22 @@
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_32
# define X86_ALIGN .long
#else
# define X86_ALIGN .quad
#endif
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
.align 4
X86_ALIGN 1b
.previous
.endm
#else
.macro LOCK_PREFIX
.endm
#endif
#endif /* __ASSEMBLY__ */

View file

@ -1,5 +0,0 @@
#ifdef CONFIG_X86_32
# include "alternative-asm_32.i"
#else
# include "alternative-asm_64.i"
#endif

Some files were not shown because too many files have changed in this diff Show more