diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index e1924cc9687b..ff8bb3770c9d 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig @@ -113,7 +113,7 @@ CONFIG_IOSAPIC=y CONFIG_IA64_SGI_SN_XP=m CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y -CONFIG_NR_CPUS=512 +CONFIG_NR_CPUS=1024 # CONFIG_HOTPLUG_CPU is not set CONFIG_SCHED_SMT=y CONFIG_PREEMPT=y diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 5b7e736f3b49..028a2b95936c 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -249,3 +249,32 @@ time_init (void) */ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } + +#define SMALLUSECS 100 + +void +udelay (unsigned long usecs) +{ + unsigned long start; + unsigned long cycles; + unsigned long smallusecs; + + /* + * Execute the non-preemptible delay loop (because the ITC might + * not be synchronized between CPUS) in relatively short time + * chunks, allowing preemption between the chunks. + */ + while (usecs > 0) { + smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs; + preempt_disable(); + cycles = smallusecs*local_cpu_data->cyc_per_usec; + start = ia64_get_itc(); + + while (ia64_get_itc() - start < cycles) + cpu_relax(); + + preempt_enable(); + usecs -= smallusecs; + } +} +EXPORT_SYMBOL(udelay); diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index c6d40446c2c4..b631cf86ed44 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -53,7 +53,7 @@ static void uncached_ipi_visibility(void *data) if ((status != PAL_VISIBILITY_OK) && (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " - "CPU %i\n", status, get_cpu()); + "CPU %i\n", status, raw_smp_processor_id()); } @@ -63,7 +63,7 @@ static void uncached_ipi_mc_drain(void *data) status = ia64_pal_mc_drain(); if (status) printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " - "CPU %i\n", status, get_cpu()); + "CPU %i\n", status, raw_smp_processor_id()); } @@ -105,7 +105,7 @@ uncached_get_new_chunk(struct gen_pool *poolp) status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", - status, get_cpu()); + status, raw_smp_processor_id()); if (!status) { status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 30d8564e9603..73af6267d2ef 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -177,6 +177,9 @@ SECTIONS } . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ + .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) + { *(.data.read_mostly) } + .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { *(.data.cacheline_aligned) } diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index 5d54f5f4e926..471bbaa65d1b 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c @@ -202,7 +202,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; - int mymm = (mm == current->active_mm); + int mymm = (mm == current->active_mm && current->mm); volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; short nasids[MAX_NUMNODES], nix; diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h index 57182d6f2b9a..bba702076391 100644 --- a/include/asm-ia64/delay.h +++ b/include/asm-ia64/delay.h @@ -84,14 +84,6 @@ __delay (unsigned long loops) ia64_delay_loop (loops - 1); } -static __inline__ void -udelay (unsigned long usecs) -{ - unsigned long start = ia64_get_itc(); - unsigned long cycles = usecs*local_cpu_data->cyc_per_usec; - - while (ia64_get_itc() - start < cycles) - cpu_relax(); -} +extern void udelay (unsigned long usecs); #endif /* _ASM_IA64_DELAY_H */ diff --git a/include/linux/cache.h b/include/linux/cache.h index f6b5a46c5f82..0b7ecf3af78a 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -13,7 +13,7 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif -#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) +#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64) #define __read_mostly __attribute__((__section__(".data.read_mostly"))) #else #define __read_mostly