Merge branch 'linux-2.6' into powerpc-next

This commit is contained in:
Paul Mackerras 2008-05-16 23:13:42 +10:00
commit fcff474ea5
503 changed files with 16310 additions and 4544 deletions

View file

@ -310,8 +310,8 @@ and then start a subshell 'sh' in that cgroup:
cd /dev/cgroup
mkdir Charlie
cd Charlie
/bin/echo 2-3 > cpus
/bin/echo 1 > mems
/bin/echo 2-3 > cpuset.cpus
/bin/echo 1 > cpuset.mems
/bin/echo $$ > tasks
sh
# The subshell 'sh' is now running in cgroup Charlie

View file

@ -289,6 +289,14 @@ Who: Glauber Costa <gcosta@redhat.com>
---------------------------
What: old style serial driver for ColdFire (CONFIG_SERIAL_COLDFIRE)
When: 2.6.28
Why: This driver still uses the old interface and has been replaced
by CONFIG_SERIAL_MCF.
Who: Sebastian Siewior <sebastian@breakpoint.cc>
---------------------------
What: /sys/o2cb symlink
When: January 2010
Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb

View file

@ -69,7 +69,8 @@ point2: Set the pwm speed at a higher temperature bound.
The ADT7473 will scale the pwm between the lower and higher pwm speed when
the temperature is between the two temperature boundaries. PWM values range
from 0 (off) to 255 (full speed).
from 0 (off) to 255 (full speed). Fan speed will be set to maximum when the
temperature sensor associated with the PWM control exceeds temp#_max.
Notes
-----

View file

@ -398,9 +398,6 @@ and is between 256 and 4096 characters. It is defined in the file
cio_ignore= [S390]
See Documentation/s390/CommonIO for details.
cio_msg= [S390]
See Documentation/s390/CommonIO for details.
clock= [BUGS=X86-32, HW] gettimeofday clocksource override.
[Deprecated]
Forces specified clocksource (if available) to be used
@ -689,6 +686,12 @@ and is between 256 and 4096 characters. It is defined in the file
floppy= [HW]
See Documentation/floppy.txt.
force_pal_cache_flush
[IA-64] Avoid check_sal_cache_flush which may hang on
buggy SAL_CACHE_FLUSH implementations. Using this
parameter will force ia64_sal_cache_flush to call
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
support via parallel port (up to 5 devices per port)

View file

@ -994,7 +994,17 @@ The Linux kernel has eight basic CPU memory barriers:
DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
All CPU memory barriers unconditionally imply compiler barriers.
All memory barriers except the data dependency barriers imply a compiler
barrier. Data dependencies do not impose any additional compiler ordering.
Aside: In the case of data dependencies, the compiler would be expected to
issue the loads in the correct order (eg. `a[b]` would have to load the value
of b before loading a[b]), however there is no guarantee in the C specification
that the compiler may not speculate the value of b (eg. is equal to 1) and load
a before b (eg. tmp = a[1]; if (b != 1) tmp = a[b]; ). There is also the
problem of a compiler reloading b after having loaded a[b], thus having a newer
copy of b than a[b]. A consensus has not yet been reached about these problems,
however the ACCESS_ONCE macro is a good place to start looking.
SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
systems because it is assumed that a CPU will appear to be self-consistent,

View file

@ -5,6 +5,6 @@
4 -> DViCO FusionHDTV5 Express [18ac:d500]
5 -> Hauppauge WinTV-HVR1500Q [0070:7790,0070:7797]
6 -> Hauppauge WinTV-HVR1500 [0070:7710,0070:7717]
7 -> Hauppauge WinTV-HVR1200 [0070:71d1]
7 -> Hauppauge WinTV-HVR1200 [0070:71d1,0070:71d3]
8 -> Hauppauge WinTV-HVR1700 [0070:8101]
9 -> Hauppauge WinTV-HVR1400 [0070:8010]

View file

@ -14,4 +14,4 @@
13 -> Terratec Prodigy XS (em2880) [0ccd:0047]
14 -> Pixelview Prolink PlayTV USB 2.0 (em2820/em2840)
15 -> V-Gear PocketTV (em2800)
16 -> Hauppauge WinTV HVR 950 (em2880) [2040:6513]
16 -> Hauppauge WinTV HVR 950 (em2880) [2040:6513,2040:6517,2040:651b,2040:651f]

View file

@ -367,12 +367,12 @@ S: Maintained for 2.4; PCI support for 2.6.
AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
P: Thomas Dahlmann
M: thomas.dahlmann@amd.com
L: info-linux@geode.amd.com (subscribers-only)
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
S: Supported
AMD GEODE PROCESSOR/CHIPSET SUPPORT
P: Jordan Crouse
L: info-linux@geode.amd.com (subscribers-only)
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@ -1230,6 +1230,15 @@ P: Jaya Kumar
M: jayakumar.alsa@gmail.com
S: Maintained
CX18 VIDEO4LINUX DRIVER
P: Hans Verkuil, Andy Walls
M: hverkuil@xs4all.nl, awalls@radix.net
L: ivtv-devel@ivtvdriver.org
L: ivtv-users@ivtvdriver.org
L: video4linux-list@redhat.com
W: http://linuxtv.org
S: Maintained
CYBERPRO FB DRIVER
P: Russell King
M: rmk@arm.linux.org.uk

View file

@ -544,10 +544,10 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
struct resource *fb_res = &lcdc_resources[2];
size_t fb_len = fb_res->end - fb_res->start + 1;
fb = ioremap_writecombine(fb_res->start, fb_len);
fb = ioremap(fb_res->start, fb_len);
if (fb) {
memset(fb, 0, fb_len);
iounmap(fb, fb_len);
iounmap(fb);
}
}
lcdc_data = *data;

View file

@ -332,13 +332,6 @@ static struct resource lcdc_resources[] = {
.end = AT91SAM9RL_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
#if defined(CONFIG_FB_INTSRAM)
[2] = {
.start = AT91SAM9RL_SRAM_BASE,
.end = AT91SAM9RL_SRAM_BASE + AT91SAM9RL_SRAM_SIZE - 1,
.flags = IORESOURCE_MEM,
},
#endif
};
static struct platform_device at91_lcdc_device = {
@ -381,20 +374,6 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
at91_set_B_periph(AT91_PIN_PC24, 0); /* LCDD22 */
at91_set_B_periph(AT91_PIN_PC25, 0); /* LCDD23 */
#ifdef CONFIG_FB_INTSRAM
{
void __iomem *fb;
struct resource *fb_res = &lcdc_resources[2];
size_t fb_len = fb_res->end - fb_res->start + 1;
fb = ioremap_writecombine(fb_res->start, fb_len);
if (fb) {
memset(fb, 0, fb_len);
iounmap(fb, fb_len);
}
}
#endif
lcdc_data = *data;
platform_device_register(&at91_lcdc_device);
}

View file

@ -135,6 +135,7 @@ config IA64_GENERIC
HP-zx1/sx1000 For HP systems
HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
SGI-SN2 For SGI Altix systems
SGI-UV For SGI UV systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
If you don't know what to do, choose "generic".
@ -170,6 +171,18 @@ config IA64_SGI_SN2
to select this option. If in doubt, select ia64 generic support
instead.
config IA64_SGI_UV`
bool "SGI-UV`"
select NUMA
select ACPI_NUMA
select SWIOTLB
help
Selecting this option will optimize the kernel for use on UV based
systems, but the resulting kernel binary will not run on other
types of ia64 systems. If you have an SGI UV system, it's safe
to select this option. If in doubt, select ia64 generic support
instead.
config IA64_HP_SIM
bool "Ski-simulator"
select SWIOTLB

View file

@ -63,7 +63,7 @@ drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/ arch/ia64/uv/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot

View file

@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <asm/intrinsics.h>
@ -29,7 +28,6 @@
extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
struct exec_domain ia32_exec_domain;
struct page *ia32_shared_page[NR_CPUS];
unsigned long *ia32_boot_gdt;
unsigned long *cpu_gdt_table[NR_CPUS];
@ -240,14 +238,6 @@ ia32_cpu_init (void)
static int __init
ia32_init (void)
{
ia32_exec_domain.name = "Linux/x86";
ia32_exec_domain.handler = NULL;
ia32_exec_domain.pers_low = PER_LINUX32;
ia32_exec_domain.pers_high = PER_LINUX32;
ia32_exec_domain.signal_map = default_exec_domain.signal_map;
ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
register_exec_domain(&ia32_exec_domain);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern struct kmem_cache *ia64_partial_page_cachep;

View file

@ -117,7 +117,10 @@ acpi_get_sysname(void)
if (!strcmp(hdr->oem_id, "HP")) {
return "hpzx1";
} else if (!strcmp(hdr->oem_id, "SGI")) {
return "sn2";
if (!strcmp(hdr->oem_table_id + 4, "UV"))
return "uv";
else
return "sn2";
}
return "dig";
@ -130,6 +133,8 @@ acpi_get_sysname(void)
return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_SGI_UV)
return "uv";
# elif defined (CONFIG_IA64_DIG)
return "dig";
# else
@ -622,6 +627,9 @@ void acpi_unregister_gsi(u32 gsi)
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
return;
if (has_8259 && gsi < 16)
return;
iosapic_unregister_intr(gsi);
}

View file

@ -1156,6 +1156,9 @@ skip_rbs_switch:
* r31 = current->thread_info->flags
* On exit:
* p6 = TRUE if work-pending-check needs to be redone
*
* Interrupts are disabled on entry, reenabled depend on work, and
* disabled on exit.
*/
.work_pending_syscall:
add r2=-8,r2
@ -1164,16 +1167,16 @@ skip_rbs_switch:
st8 [r2]=r8
st8 [r3]=r10
.work_pending:
tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
;;
(pKStk) st4 [r20]=r21
ssm psr.i // enable interrupts
#endif
ssm psr.i // enable interrupts
br.call.spnt.many rp=schedule
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
rsm psr.i // disable interrupts
;;
#ifdef CONFIG_PREEMPT
@ -1182,13 +1185,13 @@ skip_rbs_switch:
(pKStk) st4 [r20]=r0 // preempt_count() <- 0
#endif
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // re-check
br.cond.sptk.many .work_processed_kernel
.notify:
(pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // don't re-check
br.cond.sptk.many .work_processed_kernel
.work_pending_syscall_end:
adds r2=PT(R8)+16,r12
@ -1196,7 +1199,7 @@ skip_rbs_switch:
;;
ld8 r8=[r2]
ld8 r10=[r3]
br.cond.sptk.many .work_processed_syscall // re-check
br.cond.sptk.many .work_processed_syscall
END(ia64_leave_kernel)
@ -1234,9 +1237,12 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
END(ia64_invoke_schedule_tail)
/*
* Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
* be set up by the caller. We declare 8 input registers so the system call
* args get preserved, in case we need to restart a system call.
* Setup stack and call do_notify_resume_user(), keeping interrupts
* disabled.
*
* Note that pSys and pNonSys need to be set up by the caller.
* We declare 8 input registers so the system call args get preserved,
* in case we need to restart a system call.
*/
ENTRY(notify_resume_user)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)

View file

@ -900,12 +900,6 @@ static void
palinfo_smp_call(void *info)
{
palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
if (data == NULL) {
printk(KERN_ERR "palinfo: data pointer is NULL\n");
data->ret = 0; /* no output */
return;
}
/* does this actual call */
data->ret = (*data->func)(data->page);
}

View file

@ -5013,12 +5013,13 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
}
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
/*
* pfm_handle_work() can be called with interrupts enabled
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
* call may sleep, therefore we must re-enable interrupts
* to avoid deadlocks. It is safe to do so because this function
* is called ONLY when returning to user level (PUStk=1), in which case
* is called ONLY when returning to user level (pUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
@ -5034,7 +5035,8 @@ pfm_handle_work(void)
ctx = PFM_GET_CTX(current);
if (ctx == NULL) {
printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
task_pid_nr(current));
return;
}
@ -5058,11 +5060,12 @@ pfm_handle_work(void)
/*
* must be done before we check for simple-reset mode
*/
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
goto do_zombie;
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET)
goto skip_blocking;
/*
* restore interrupt mask to what it was on entry.
@ -5110,7 +5113,8 @@ do_zombie:
/*
* in case of interruption of down() we don't restart anything
*/
if (ret < 0) goto nothing_to_do;
if (ret < 0)
goto nothing_to_do;
skip_blocking:
pfm_resume_after_ovfl(ctx, ovfl_regs, regs);

View file

@ -167,11 +167,18 @@ void tsk_clear_notify_resume(struct task_struct *tsk)
clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME);
}
/*
* do_notify_resume_user():
* Called from notify_resume_user at entry.S, with interrupts disabled.
*/
void
do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
if (fsys_mode(current, &scr->pt)) {
/* defer signal-handling etc. until we return to privilege-level 0. */
/*
* defer signal-handling etc. until we return to
* privilege-level 0.
*/
if (!ia64_psr(&scr->pt)->lp)
ia64_psr(&scr->pt)->lp = 1;
return;
@ -179,16 +186,26 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
#ifdef CONFIG_PERFMON
if (current->thread.pfm_needs_checking)
/*
* Note: pfm_handle_work() allow us to call it with interrupts
* disabled, and may enable interrupts within the function.
*/
pfm_handle_work();
#endif
/* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING))
if (test_thread_flag(TIF_SIGPENDING)) {
local_irq_enable(); /* force interrupt enable */
ia64_do_signal(scr, in_syscall);
}
/* copy user rbs to kernel rbs */
if (unlikely(test_thread_flag(TIF_RESTORE_RSE)))
if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
local_irq_enable(); /* force interrupt enable */
ia64_sync_krbs();
}
local_irq_disable(); /* force interrupt disable */
}
static int pal_halt = 1;

View file

@ -229,6 +229,14 @@ static void __init sal_desc_ap_wakeup(void *p) { }
*/
static int sal_cache_flush_drops_interrupts;
static int __init
force_pal_cache_flush(char *str)
{
sal_cache_flush_drops_interrupts = 1;
return 0;
}
early_param("force_pal_cache_flush", force_pal_cache_flush);
void __init
check_sal_cache_flush (void)
{
@ -237,6 +245,9 @@ check_sal_cache_flush (void)
u64 vector, cache_type = 3;
struct ia64_sal_retval isrv;
if (sal_cache_flush_drops_interrupts)
return;
cpu = get_cpu();
local_irq_save(flags);

View file

@ -239,6 +239,25 @@ __initcall(register_memory);
#ifdef CONFIG_KEXEC
/*
* This function checks if the reserved crashkernel is allowed on the specific
* IA64 machine flavour. Machines without an IO TLB use swiotlb and require
* some memory below 4 GB (i.e. in 32 bit area), see the implementation of
* lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that
* in kdump case. See the comment in sba_init() in sba_iommu.c.
*
* So, the only machvec that really supports loading the kdump kernel
* over 4 GB is "sn2".
*/
static int __init check_crashkernel_memory(unsigned long pbase, size_t size)
{
if (ia64_platform_is("sn2") || ia64_platform_is("uv"))
return 1;
else
return pbase < (1UL << 32);
}
static void __init setup_crashkernel(unsigned long total, int *n)
{
unsigned long long base = 0, size = 0;
@ -252,6 +271,16 @@ static void __init setup_crashkernel(unsigned long total, int *n)
base = kdump_find_rsvd_region(size,
rsvd_region, *n);
}
if (!check_crashkernel_memory(base, size)) {
pr_warning("crashkernel: There would be kdump memory "
"at %ld GB but this is unusable because it "
"must\nbe below 4 GB. Change the memory "
"configuration of the machine.\n",
(unsigned long)(base >> 30));
return;
}
if (base != ~0UL) {
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",

View file

@ -719,3 +719,28 @@ out:
EXPORT_SYMBOL_GPL(remove_memory);
#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif
/*
* Even when CONFIG_IA32_SUPPORT is not enabled it is
* useful to have the Linux/x86 domain registered to
* avoid an attempted module load when emulators call
* personality(PER_LINUX32). This saves several milliseconds
* on each such call.
*/
static struct exec_domain ia32_exec_domain;
static int __init
per_linux32_init(void)
{
ia32_exec_domain.name = "Linux/x86";
ia32_exec_domain.handler = NULL;
ia32_exec_domain.pers_low = PER_LINUX32;
ia32_exec_domain.pers_high = PER_LINUX32;
ia32_exec_domain.signal_map = default_exec_domain.signal_map;
ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
register_exec_domain(&ia32_exec_domain);
return 0;
}
__initcall(per_linux32_init);

12
arch/ia64/uv/Makefile Normal file
View file

@ -0,0 +1,12 @@
# arch/ia64/uv/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn uv subplatform
#
obj-y += kernel/

View file

@ -0,0 +1,13 @@
# arch/ia64/uv/kernel/Makefile
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2008 Silicon Graphics, Inc. All Rights Reserved.
#
EXTRA_CFLAGS += -Iarch/ia64/sn/include
obj-y += setup.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o

View file

@ -0,0 +1,11 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#define MACHVEC_PLATFORM_NAME uv
#define MACHVEC_PLATFORM_HEADER <asm/machvec_uv.h>
#include <asm/machvec_init.h>

View file

@ -0,0 +1,98 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI UV Core Functions
*
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/percpu.h>
#include <asm/sn/simulator.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
struct redir_addr {
unsigned long redirect;
unsigned long alias;
};
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
static __initdata struct redir_addr redir_addrs[] = {
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
};
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
{
union uvh_si_alias0_overlay_config_u alias;
union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
int i;
for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
alias.v = uv_read_local_mmr(redir_addrs[i].alias);
if (alias.s.base == 0) {
*size = (1UL << alias.s.m_alias);
redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
*base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
return;
}
}
BUG();
}
void __init uv_setup(char **cmdline_p)
{
union uvh_si_addr_map_config_u m_n_config;
union uvh_node_id_u node_id;
unsigned long gnode_upper;
int nid, cpu, m_val, n_val;
unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size;
if (IS_MEDUSA()) {
lowmem_redir_base = 0;
lowmem_redir_size = 0;
node_id.v = 0;
m_n_config.s.m_skt = 37;
m_n_config.s.n_skt = 0;
mmr_base = 0;
} else {
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
node_id.v = uv_read_local_mmr(UVH_NODE_ID);
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
mmr_base =
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
~UV_MMR_ENABLE;
}
m_val = m_n_config.s.m_skt;
n_val = m_n_config.s.n_skt;
printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
gnode_upper = (((unsigned long)node_id.s.node_id) &
~((1 << n_val) - 1)) << m_val;
for_each_present_cpu(cpu) {
nid = cpu_to_node(cpu);
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
uv_cpu_hub_info(cpu)->lowmem_remap_top =
lowmem_redir_base + lowmem_redir_size;
uv_cpu_hub_info(cpu)->m_val = m_val;
uv_cpu_hub_info(cpu)->n_val = m_val;
uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1;
uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid);
}
}

View file

@ -671,6 +671,9 @@ config ROMKERNEL
endchoice
if COLDFIRE
source "kernel/Kconfig.preempt"
endif
source "mm/Kconfig"
endmenu

View file

@ -114,6 +114,16 @@ SECTIONS {
*(__kcrctab_gpl)
__stop___kcrctab_gpl = .;
/* Kernel symbol table: Normal unused symbols */
__start___kcrctab_unused = .;
*(__kcrctab_unused)
__stop___kcrctab_unused = .;
/* Kernel symbol table: GPL-only unused symbols */
__start___kcrctab_unused_gpl = .;
*(__kcrctab_unused_gpl)
__stop___kcrctab_unused_gpl = .;
/* Kernel symbol table: GPL-future symbols */
__start___kcrctab_gpl_future = .;
*(__kcrctab_gpl_future)

View file

@ -26,42 +26,42 @@ rm -fr $4/../usr/include/linux $4/../usr/include/asm
install -c -m 0755 $2 $4/vmlinuz
install -c -m 0755 $5 $4/boot.rom
install -c -m 0755 -d $4/../usr/include/linux
cd $TOPDIR/include/linux
cd ${srctree}/include/linux
for i in `find . -maxdepth 1 -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux
done
install -c -m 0755 -d $4/../usr/include/linux/byteorder
cd $TOPDIR/include/linux/byteorder
cd ${srctree}/include/linux/byteorder
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/byteorder
done
install -c -m 0755 -d $4/../usr/include/linux/lockd
cd $TOPDIR/include/linux/lockd
cd ${srctree}/include/linux/lockd
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/lockd
done
install -c -m 0755 -d $4/../usr/include/linux/netfilter_ipv4
cd $TOPDIR/include/linux/netfilter_ipv4
cd ${srctree}/include/linux/netfilter_ipv4
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/netfilter_ipv4
done
install -c -m 0755 -d $4/../usr/include/linux/nfsd
cd $TOPDIR/include/linux/nfsd
cd ${srctree}/include/linux/nfsd
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/nfsd/$i
done
install -c -m 0755 -d $4/../usr/include/linux/raid
cd $TOPDIR/include/linux/raid
cd ${srctree}/include/linux/raid
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/raid
done
install -c -m 0755 -d $4/../usr/include/linux/sunrpc
cd $TOPDIR/include/linux/sunrpc
cd ${srctree}/include/linux/sunrpc
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/linux/sunrpc
done
install -c -m 0755 -d $4/../usr/include/asm
cd $TOPDIR/include/asm
cd ${srctree}/include/asm
for i in `find . -name '*.h' -print`; do
install -c -m 0644 $i $4/../usr/include/asm
done

View file

@ -13,9 +13,10 @@
#include <asm/unistd.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
.text
__HEAD
.import hpux_call_table
.import hpux_syscall_exit,code

View file

@ -28,9 +28,10 @@
#include <asm/assembly.h>
#include <asm/signal.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
.text
__HEAD
/* These should probably go in a header file somewhere.
* They are duplicated in kernel/wrappers.S

View file

@ -4,9 +4,6 @@
extra-y := init_task.o head.o vmlinux.lds
AFLAGS_entry.o := -traditional
AFLAGS_pacache.o := -traditional
obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o \

View file

@ -38,18 +38,11 @@
#include <asm/thread_info.h>
#include <linux/linkage.h>
#include <linux/init.h>
#ifdef CONFIG_64BIT
#define CMPIB cmpib,*
#define CMPB cmpb,*
#define COND(x) *x
.level 2.0w
#else
#define CMPIB cmpib,
#define CMPB cmpb,
#define COND(x) x
.level 2.0
#endif
@ -629,7 +622,7 @@
* the static part of the kernel address space.
*/
.text
__HEAD
.align PAGE_SIZE
@ -957,9 +950,9 @@ intr_check_sig:
* Only do signals if we are returning to user space
*/
LDREG PT_IASQ0(%r16), %r20
CMPIB=,n 0,%r20,intr_restore /* backward */
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
LDREG PT_IASQ1(%r16), %r20
CMPIB=,n 0,%r20,intr_restore /* backward */
cmpib,COND(=),n 0,%r20,intr_restore /* backward */
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
@ -1013,10 +1006,10 @@ intr_do_resched:
* we jump back to intr_restore.
*/
LDREG PT_IASQ0(%r16), %r20
CMPIB= 0, %r20, intr_do_preempt
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
CMPIB= 0, %r20, intr_do_preempt
cmpib,COND(=) 0, %r20, intr_do_preempt
nop
#ifdef CONFIG_64BIT
@ -1045,7 +1038,7 @@ intr_do_preempt:
/* current_thread_info()->preempt_count */
mfctl %cr30, %r1
LDREG TI_PRE_COUNT(%r1), %r19
CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
nop /* prev insn branched backwards */
/* check if we interrupted a critical path */
@ -1064,7 +1057,7 @@ intr_do_preempt:
*/
intr_extint:
CMPIB=,n 0,%r16,1f
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b,n 2f
@ -1099,7 +1092,7 @@ ENDPROC(syscall_exit_rfi)
ENTRY(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
CMPIB=,n 0,%r16,1f
cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b 2f
copy %r8,%r26
@ -1121,7 +1114,7 @@ ENTRY(intr_save) /* for os_hpmc */
* adjust isr/ior below.
*/
CMPIB=,n 6,%r26,skip_save_ior
cmpib,COND(=),n 6,%r26,skip_save_ior
mfctl %cr20, %r16 /* isr */
@ -1450,11 +1443,11 @@ nadtlb_emulate:
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
BL get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
BL get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
@ -1486,7 +1479,7 @@ nadtlb_probe_check:
cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
BL get_register,%r25 /* Find the target register */
extrw,u %r9,31,5,%r8 /* Get target register */
CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
BL set_register,%r25
copy %r0,%r1 /* Write zero to target register */
b nadtlb_nullify /* Nullify return insn */
@ -1570,12 +1563,12 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_20w
cmpib,COND(=),n 0,spc,dbit_nolock_20w
load32 PA(pa_dbit_lock),t0
dbit_spin_20w:
LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_20w
cmpib,COND(=) 0,t1,dbit_spin_20w
nop
dbit_nolock_20w:
@ -1586,7 +1579,7 @@ dbit_nolock_20w:
idtlbt pte,prot
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nounlock_20w
cmpib,COND(=),n 0,spc,dbit_nounlock_20w
ldi 1,t1
stw t1,0(t0)
@ -1606,7 +1599,7 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_11
cmpib,COND(=),n 0,spc,dbit_nolock_11
load32 PA(pa_dbit_lock),t0
dbit_spin_11:
@ -1628,7 +1621,7 @@ dbit_nolock_11:
mtsp t1, %sr1 /* Restore sr1 */
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nounlock_11
cmpib,COND(=),n 0,spc,dbit_nounlock_11
ldi 1,t1
stw t1,0(t0)
@ -1646,7 +1639,7 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nolock_20
cmpib,COND(=),n 0,spc,dbit_nolock_20
load32 PA(pa_dbit_lock),t0
dbit_spin_20:
@ -1665,7 +1658,7 @@ dbit_nolock_20:
idtlbt pte,prot
#ifdef CONFIG_SMP
CMPIB=,n 0,spc,dbit_nounlock_20
cmpib,COND(=),n 0,spc,dbit_nounlock_20
ldi 1,t1
stw t1,0(t0)
@ -1994,7 +1987,7 @@ ENTRY(syscall_exit)
/* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
ldo -PER_HPUX(%r19), %r19
CMPIB<>,n 0,%r19,1f
cmpib,COND(<>),n 0,%r19,1f
/* Save other hpux returns if personality is PER_HPUX */
STREG %r22,TASK_PT_GR22(%r1)

View file

@ -32,7 +32,7 @@ ENTRY(boot_args)
.word 0 /* arg3 */
END(boot_args)
.section .text.head
__HEAD
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */

View file

@ -47,6 +47,7 @@
#include <asm/pdc.h>
#include <linux/linkage.h>
#include <linux/init.h>
/*
* stack for os_hpmc, the HPMC handler.
@ -76,7 +77,7 @@ ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
END(hpmc_pim_data)
.text
__HEAD
.import intr_save, code
ENTRY(os_hpmc)

View file

@ -499,7 +499,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs,
dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __FUNCTION__);
__FILE__, __func__);
return;
}

View file

@ -37,8 +37,9 @@
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <linux/linkage.h>
#include <linux/init.h>
.text
__HEAD
.align 128
ENTRY(flush_tlb_all_local)
@ -85,7 +86,7 @@ ENTRY(flush_tlb_all_local)
LDREG ITLB_OFF_COUNT(%r1), %arg2
LDREG ITLB_LOOP(%r1), %arg3
ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
@ -95,14 +96,14 @@ fitmanyloop: /* Loop if LOOP >= 2 */
copy %arg2, %r29 /* Init middle loop count */
fitmanymiddle: /* Loop if LOOP >= 2 */
ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
pitlbe 0(%sr1, %r28)
pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
fitoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
@ -110,10 +111,10 @@ fitoneloop: /* Loop if LOOP = 1 */
copy %arg2, %r29 /* init middle loop count */
fitonemiddle: /* Loop if LOOP = 1 */
ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
fitdone:
@ -128,7 +129,7 @@ fitdone:
LDREG DTLB_OFF_COUNT(%r1), %arg2
LDREG DTLB_LOOP(%r1), %arg3
ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
copy %arg0, %r28 /* Init base addr */
@ -138,14 +139,14 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
copy %arg2, %r29 /* Init middle loop count */
fdtmanymiddle: /* Loop if LOOP >= 2 */
ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
pdtlbe 0(%sr1, %r28)
pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
copy %arg3, %r31 /* Re-init inner loop count */
movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
fdtoneloop: /* Loop if LOOP = 1 */
mtsp %r20, %sr1
@ -153,10 +154,10 @@ fdtoneloop: /* Loop if LOOP = 1 */
copy %arg2, %r29 /* init middle loop count */
fdtonemiddle: /* Loop if LOOP = 1 */
ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
add %r21, %r20, %r20 /* increment space */
@ -209,18 +210,18 @@ ENTRY(flush_instruction_cache_local)
LDREG ICACHE_COUNT(%r1), %arg2
LDREG ICACHE_LOOP(%r1), %arg3
rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
fimanyloop: /* Loop if LOOP >= 2 */
ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
fice %r0(%sr1, %arg0)
fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
fioneloop: /* Loop if LOOP = 1 */
ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
addib,COND(>) -1, %arg2, fioneloop /* Outer loop count decr */
fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
fisync:
@ -250,18 +251,18 @@ ENTRY(flush_data_cache_local)
LDREG DCACHE_COUNT(%r1), %arg2
LDREG DCACHE_LOOP(%r1), %arg3
rsm PSW_SM_I, %r22
ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
fdmanyloop: /* Loop if LOOP >= 2 */
ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
fdce %r0(%sr1, %arg0)
fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
fdoneloop: /* Loop if LOOP = 1 */
ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
addib,COND(>) -1, %arg2, fdoneloop /* Outer loop count decr */
fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
fdsync:
@ -342,7 +343,7 @@ ENTRY(copy_user_page_asm)
* non-taken backward branch. Note that .+4 is a backwards branch.
* The ldd should only get executed if the branch is taken.
*/
ADDIB>,n -1, %r1, 1b /* bundle 10 */
addib,COND(>),n -1, %r1, 1b /* bundle 10 */
ldd 0(%r25), %r19 /* start next loads */
#else
@ -391,7 +392,7 @@ ENTRY(copy_user_page_asm)
stw %r21, 56(%r26)
stw %r22, 60(%r26)
ldo 64(%r26), %r26
ADDIB>,n -1, %r1, 1b
addib,COND(>),n -1, %r1, 1b
ldw 0(%r25), %r19
#endif
bv %r0(%r2)
@ -515,7 +516,7 @@ ENTRY(copy_user_page_asm)
stw %r21, 56(%r28)
stw %r22, 60(%r28)
ldo 64(%r28), %r28
ADDIB> -1, %r1,1b
addib,COND(>) -1, %r1,1b
ldo 64(%r29), %r29
bv %r0(%r2)
@ -574,7 +575,7 @@ ENTRY(__clear_user_page_asm)
std %r0, 104(%r28)
std %r0, 112(%r28)
std %r0, 120(%r28)
ADDIB> -1, %r1, 1b
addib,COND(>) -1, %r1, 1b
ldo 128(%r28), %r28
#else /* ! CONFIG_64BIT */
@ -597,7 +598,7 @@ ENTRY(__clear_user_page_asm)
stw %r0, 52(%r28)
stw %r0, 56(%r28)
stw %r0, 60(%r28)
ADDIB> -1, %r1, 1b
addib,COND(>) -1, %r1, 1b
ldo 64(%r28), %r28
#endif /* CONFIG_64BIT */
@ -640,7 +641,7 @@ ENTRY(flush_kernel_dcache_page_asm)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
CMPB<< %r26, %r25,1b
cmpb,COND(<<) %r26, %r25,1b
fdc,m %r23(%r26)
sync
@ -683,7 +684,7 @@ ENTRY(flush_user_dcache_page)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
CMPB<< %r26, %r25,1b
cmpb,COND(<<) %r26, %r25,1b
fdc,m %r23(%sr3, %r26)
sync
@ -726,7 +727,7 @@ ENTRY(flush_user_icache_page)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
CMPB<< %r26, %r25,1b
cmpb,COND(<<) %r26, %r25,1b
fic,m %r23(%sr3, %r26)
sync
@ -769,7 +770,7 @@ ENTRY(purge_kernel_dcache_page)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
pdc,m %r23(%r26)
CMPB<< %r26, %r25, 1b
cmpb,COND(<<) %r26, %r25, 1b
pdc,m %r23(%r26)
sync
@ -833,7 +834,7 @@ ENTRY(flush_alias_page)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
CMPB<< %r28, %r29, 1b
cmpb,COND(<<) %r28, %r29, 1b
fdc,m %r23(%r28)
sync
@ -856,7 +857,7 @@ flush_user_dcache_range_asm:
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: CMPB<<,n %r26, %r25, 1b
1: cmpb,COND(<<),n %r26, %r25, 1b
fdc,m %r23(%sr3, %r26)
sync
@ -877,7 +878,7 @@ ENTRY(flush_kernel_dcache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: CMPB<<,n %r26, %r25,1b
1: cmpb,COND(<<),n %r26, %r25,1b
fdc,m %r23(%r26)
sync
@ -899,7 +900,7 @@ ENTRY(flush_user_icache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: CMPB<<,n %r26, %r25,1b
1: cmpb,COND(<<),n %r26, %r25,1b
fic,m %r23(%sr3, %r26)
sync
@ -942,7 +943,7 @@ ENTRY(flush_kernel_icache_page)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
fic,m %r23(%sr4, %r26)
CMPB<< %r26, %r25, 1b
cmpb,COND(<<) %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync
@ -963,7 +964,7 @@ ENTRY(flush_kernel_icache_range_asm)
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: CMPB<<,n %r26, %r25, 1b
1: cmpb,COND(<<),n %r26, %r25, 1b
fic,m %r23(%sr4, %r26)
sync

View file

@ -20,6 +20,8 @@
*/
#include <asm/assembly.h>
#include <linux/init.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
@ -41,7 +43,7 @@
; The coprocessor only needs to be enabled when
; starting/stopping the coprocessor with the pmenb/pmdis.
;
.text
__HEAD
ENTRY(perf_intrigue_enable_perf_counters)
.proc

View file

@ -12,6 +12,7 @@
#include <asm/assembly.h>
#include <linux/linkage.h>
#include <linux/init.h>
.section .bss
.export real_stack
@ -39,7 +40,7 @@ save_cr_end:
/************************ 32-bit real-mode calls ***********************/
/* This can be called in both narrow and wide kernels */
.text
__HEAD
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
@ -113,7 +114,7 @@ ENDPROC(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
.text
__HEAD
save_control_regs:
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
@ -145,7 +146,7 @@ restore_control_regs:
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
.text
__HEAD
.align 128
rfi_virt2real:
/* switch to real mode... */
@ -180,7 +181,7 @@ rfi_v2r_1:
bv 0(%r2)
nop
.text
__HEAD
.align 128
rfi_real2virt:
rsm PSW_SM_I,%r0
@ -218,7 +219,7 @@ rfi_r2v_1:
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
__HEAD
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
@ -276,7 +277,7 @@ ENDPROC(real64_call_asm)
#endif
.text
__HEAD
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.

View file

@ -17,6 +17,7 @@
#include <asm/processor.h>
#include <linux/linkage.h>
#include <linux/init.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
@ -26,7 +27,7 @@
.level LEVEL
.text
__HEAD
.import syscall_exit,code
.import syscall_exit_rfi,code
@ -636,7 +637,7 @@ END(sys_call_table64)
All light-weight-syscall atomic operations
will use this set of locks
*/
.section .data
.section .data, "aw"
.align PAGE_SIZE
ENTRY(lws_lock_start)
/* lws locks */

View file

@ -275,7 +275,7 @@ KERN_CRIT " || ||\n");
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
printk(KERN_CRIT "%s() recursion detected.\n", __func__);
local_irq_enable();
while (1);
}

View file

@ -30,7 +30,7 @@
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
@ -460,7 +460,8 @@ void handle_unaligned(struct pt_regs *regs)
goto force_sigbus;
}
if (unaligned_count > 5 && jiffies - last_time > 5*HZ) {
if (unaligned_count > 5 &&
time_after(jiffies, last_time + 5 * HZ)) {
unaligned_count = 0;
last_time = jiffies;
}

View file

@ -23,6 +23,7 @@
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
@ -55,7 +56,7 @@
.level LEVEL
.text
__HEAD
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */

View file

@ -33,11 +33,12 @@
*/
.text
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
__HEAD
/*
* get_sr gets the appropriate space value into

View file

@ -91,7 +91,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define THRESHOLD 16
#ifdef DEBUG_MEMCPY
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif

View file

@ -555,8 +555,6 @@ void show_mem(void)
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n",
nr_swap_pages<<(PAGE_SHIFT-10));
#ifndef CONFIG_DISCONTIGMEM
i = max_mapnr;
while (i-- > 0) {
@ -606,7 +604,7 @@ void show_mem(void)
int i, j;
for (i = 0; i < npmem_ranges; i++) {
zl = node_zonelist(i);
zl = node_zonelist(i, 0);
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zoneref *z;
struct zone *zone;

View file

@ -94,6 +94,9 @@ unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int mmu_vmemmap_psize = MMU_PAGE_4K;
#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
int mmu_highuser_ssize = MMU_SEGSIZE_256M;
@ -387,11 +390,32 @@ static void __init htab_init_page_sizes(void)
}
#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* We try to use 16M pages for vmemmap if that is supported
* and we have at least 1G of RAM at boot
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
lmb_phys_mem_size() >= 0x40000000)
mmu_vmemmap_psize = MMU_PAGE_16M;
else if (mmu_psize_defs[MMU_PAGE_64K].shift)
mmu_vmemmap_psize = MMU_PAGE_64K;
else
mmu_vmemmap_psize = MMU_PAGE_4K;
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
printk(KERN_DEBUG "Page orders: linear mapping = %d, "
"virtual = %d, io = %d\n",
"virtual = %d, io = %d"
#ifdef CONFIG_SPARSEMEM_VMEMMAP
", vmemmap = %d"
#endif
"\n",
mmu_psize_defs[mmu_linear_psize].shift,
mmu_psize_defs[mmu_virtual_psize].shift,
mmu_psize_defs[mmu_io_psize].shift);
mmu_psize_defs[mmu_io_psize].shift
#ifdef CONFIG_SPARSEMEM_VMEMMAP
,mmu_psize_defs[mmu_vmemmap_psize].shift
#endif
);
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending

View file

@ -19,6 +19,8 @@
*
*/
#undef DEBUG
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@ -208,12 +210,12 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
}
int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node)
unsigned long nr_pages, int node)
{
unsigned long mode_rw;
unsigned long start = (unsigned long)start_page;
unsigned long end = (unsigned long)(start_page + nr_pages);
unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@ -235,11 +237,11 @@ int __meminit vmemmap_populate(struct page *start_page,
start, p, __pa(p));
mapped = htab_bolt_mapping(start, start + page_size,
__pa(p), mode_rw, mmu_linear_psize,
__pa(p), mode_rw, mmu_vmemmap_psize,
mmu_kernel_ssize);
BUG_ON(mapped < 0);
}
return 0;
}
#endif
#endif /* CONFIG_SPARSEMEM_VMEMMAP */

View file

@ -28,7 +28,7 @@
#include <asm/udbg.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#define DBG(fmt...) printk(fmt)
#else
#define DBG pr_debug
#endif
@ -263,13 +263,19 @@ void slb_initialize(void)
extern unsigned int *slb_miss_kernel_load_linear;
extern unsigned int *slb_miss_kernel_load_io;
extern unsigned int *slb_compare_rr_to_size;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
extern unsigned int *slb_miss_kernel_load_vmemmap;
unsigned long vmemmap_llp;
#endif
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
io_llp = mmu_psize_defs[mmu_io_psize].sllp;
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
@ -281,6 +287,12 @@ void slb_initialize(void)
DBG("SLB: linear LLP = %04lx\n", linear_llp);
DBG("SLB: io LLP = %04lx\n", io_llp);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
patch_slb_encoding(slb_miss_kernel_load_vmemmap,
SLB_VSID_KERNEL | vmemmap_llp);
DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
get_paca()->stab_rr = SLB_NUM_BOLTED;

View file

@ -47,8 +47,7 @@ _GLOBAL(slb_allocate_realmode)
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */
/* Check if hitting the linear mapping of the vmalloc/ioremap
* kernel space
/* Check if hitting the linear mapping or some other kernel space
*/
bne cr7,1f
@ -62,7 +61,18 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
b slb_finish_load_1T
1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
1:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Check virtual memmap region. To be patches at kernel boot */
cmpldi cr0,r9,0xf
bne 1f
_GLOBAL(slb_miss_kernel_load_vmemmap)
li r11,0
b 6f
1:
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
BEGIN_FTR_SECTION

View file

@ -118,7 +118,7 @@ static void iowa_##name at \
#undef DEF_PCI_AC_RET
#undef DEF_PCI_AC_NORET
static struct ppc_pci_io __initdata iowa_pci_io = {
static const struct ppc_pci_io __devinitconst iowa_pci_io = {
#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
@ -146,7 +146,7 @@ static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
}
/* Regist new bus to support workaround */
void __init iowa_register_bus(struct pci_controller *phb,
void __devinit iowa_register_bus(struct pci_controller *phb,
struct ppc_pci_io *ops,
int (*initfunc)(struct iowa_bus *, void *), void *data)
{
@ -173,7 +173,7 @@ void __init iowa_register_bus(struct pci_controller *phb,
}
/* enable IO workaround */
void __init io_workaround_init(void)
void __devinit io_workaround_init(void)
{
static int io_workaround_inited;

View file

@ -31,9 +31,9 @@ struct iowa_bus {
void *private;
};
void __init io_workaround_init(void);
void __init iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
int (*)(struct iowa_bus *, void *), void *);
void __devinit io_workaround_init(void);
void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
int (*)(struct iowa_bus *, void *), void *);
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
struct iowa_bus *iowa_pio_find_bus(unsigned long);

View file

@ -32,6 +32,7 @@
#include <linux/marker.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/spu.h>
#include <asm/spu_info.h>
#include <asm/uaccess.h>

View file

@ -659,7 +659,7 @@ static struct spu *find_victim(struct spu_context *ctx)
victim->stats.invol_ctx_switch++;
spu->stats.invol_ctx_switch++;
if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
spu_add_to_rq(victim);
mutex_unlock(&victim->state_mutex);

View file

@ -71,7 +71,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
size_t user_len, loff_t * offset);
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t* debug_info_create(char *name, int pages_per_area,
static debug_info_t *debug_info_create(const char *name, int pages_per_area,
int nr_areas, int buf_size, mode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
@ -234,8 +234,8 @@ fail_malloc_areas:
*/
static debug_info_t*
debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
int level, int mode)
debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
int buf_size, int level, int mode)
{
debug_info_t* rc;
@ -326,8 +326,8 @@ debug_info_free(debug_info_t* db_info){
*/
static debug_info_t*
debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
mode_t mode)
debug_info_create(const char *name, int pages_per_area, int nr_areas,
int buf_size, mode_t mode)
{
debug_info_t* rc;
@ -684,9 +684,9 @@ debug_close(struct inode *inode, struct file *file)
* - Returns handle for debug area
*/
debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid)
debug_info_t *debug_register_mode(const char *name, int pages_per_area,
int nr_areas, int buf_size, mode_t mode,
uid_t uid, gid_t gid)
{
debug_info_t *rc = NULL;
@ -722,8 +722,8 @@ EXPORT_SYMBOL(debug_register_mode);
* - returns handle for debug area
*/
debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
int buf_size)
debug_info_t *debug_register(const char *name, int pages_per_area,
int nr_areas, int buf_size)
{
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);

View file

@ -25,6 +25,7 @@ int show_interrupts(struct seq_file *p, void *v)
static const char *intrclass_names[] = { "EXT", "I/O", };
int i = *(loff_t *) v, j;
get_online_cpus();
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
@ -43,7 +44,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
}
put_online_cpus();
return 0;
}

View file

@ -139,7 +139,6 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
if (wait)
data.finished = CPU_MASK_NONE;
spin_lock(&call_lock);
call_data = &data;
for_each_cpu_mask(cpu, map)
@ -151,7 +150,6 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
if (wait)
while (!cpus_equal(map, data.finished))
cpu_relax();
spin_unlock(&call_lock);
out:
if (local) {
local_irq_disable();
@ -177,11 +175,11 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
{
cpumask_t map;
preempt_disable();
spin_lock(&call_lock);
map = cpu_online_map;
cpu_clear(smp_processor_id(), map);
__smp_call_function_map(func, info, nonatomic, wait, map);
preempt_enable();
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function);
@ -202,10 +200,10 @@ EXPORT_SYMBOL(smp_call_function);
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
preempt_disable();
spin_lock(&call_lock);
__smp_call_function_map(func, info, nonatomic, wait,
cpumask_of_cpu(cpu));
preempt_enable();
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
@ -228,10 +226,10 @@ EXPORT_SYMBOL(smp_call_function_single);
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
int wait)
{
preempt_disable();
spin_lock(&call_lock);
cpu_clear(smp_processor_id(), mask);
__smp_call_function_map(func, info, 0, wait, mask);
preempt_enable();
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_mask);
@ -592,7 +590,9 @@ int __cpuinit start_secondary(void *cpuvoid)
pfault_init();
/* Mark this cpu as online */
spin_lock(&call_lock);
cpu_set(smp_processor_id(), cpu_online_map);
spin_unlock(&call_lock);
/* Switch on interrupts */
local_irq_enable();
/* Print info about this processor */

View file

@ -27,19 +27,12 @@ struct memory_segment {
static LIST_HEAD(mem_segs);
static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
}
static inline pud_t *vmem_pud_alloc(void)
static pud_t *vmem_pud_alloc(void)
{
pud_t *pud = NULL;
#ifdef CONFIG_64BIT
pud = vmem_alloc_pages(2);
pud = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
if (!pud)
return NULL;
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
@ -47,12 +40,12 @@ static inline pud_t *vmem_pud_alloc(void)
return pud;
}
static inline pmd_t *vmem_pmd_alloc(void)
static pmd_t *vmem_pmd_alloc(void)
{
pmd_t *pmd = NULL;
#ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
pmd = vmemmap_alloc_block(PAGE_SIZE * 4, 0);
if (!pmd)
return NULL;
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
@ -60,7 +53,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
static pte_t __init_refok *vmem_pte_alloc(void)
static pte_t __ref *vmem_pte_alloc(void)
{
pte_t *pte;
@ -214,7 +207,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
if (pte_none(*pt_dir)) {
unsigned long new_page;
new_page =__pa(vmem_alloc_pages(0));
new_page =__pa(vmemmap_alloc_block(PAGE_SIZE, 0));
if (!new_page)
goto out;
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);

View file

@ -330,14 +330,6 @@ static char *ebin_to_mem(const char *buf, char *mem, int count)
return mem;
}
/* Pack a hex byte */
static char *pack_hex_byte(char *pkt, int byte)
{
*pkt++ = hexchars[(byte >> 4) & 0xf];
*pkt++ = hexchars[(byte & 0xf)];
return pkt;
}
/* Scan for the start char '$', read the packet and check the checksum */
static void get_packet(char *buffer, int buflen)
{

View file

@ -219,7 +219,7 @@ out:
return err;
}
int sparc_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
int sparc_mmap_check(unsigned long addr, unsigned long len)
{
if (ARCH_SUN4C_SUN4 &&
(len > 0x20000000 ||
@ -295,52 +295,14 @@ asmlinkage unsigned long sparc_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
{
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
if (ARCH_SUN4C_SUN4) {
if (old_len > 0x20000000 || new_len > 0x20000000)
goto out;
if (addr < 0xe0000000 && addr + old_len > 0x20000000)
goto out;
}
if (old_len > TASK_SIZE - PAGE_SIZE ||
new_len > TASK_SIZE - PAGE_SIZE)
if (unlikely(sparc_mmap_check(addr, old_len)))
goto out;
if (unlikely(sparc_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);
if (flags & MREMAP_FIXED) {
if (ARCH_SUN4C_SUN4 &&
new_addr < 0xe0000000 &&
new_addr + new_len > 0x20000000)
goto out_sem;
if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
goto out_sem;
} else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
addr + new_len > 0x20000000) ||
addr + new_len > TASK_SIZE - PAGE_SIZE) {
unsigned long map_flags = 0;
struct file *file = NULL;
ret = -ENOMEM;
if (!(flags & MREMAP_MAYMOVE))
goto out_sem;
vma = find_vma(current->mm, addr);
if (vma) {
if (vma->vm_flags & VM_SHARED)
map_flags |= MAP_SHARED;
file = vma->vm_file;
}
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
flags |= MREMAP_FIXED;
}
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
out_sem:
up_write(&current->mm->mmap_sem);
out:
return ret;

View file

@ -46,7 +46,7 @@ __handle_user_windows:
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
1: andcc %l0, _TIF_SIGPENDING, %g0
be,pt %xcc, __handle_user_windows_continue
nop
mov %l5, %o1
@ -86,7 +86,7 @@ __handle_perfctrs:
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
1: andcc %l0, _TIF_SIGPENDING, %g0
be,pt %xcc, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
@ -195,7 +195,7 @@ __handle_preemption_continue:
andcc %l1, %o0, %g0
andcc %l0, _TIF_NEED_RESCHED, %g0
bne,pn %xcc, __handle_preemption
andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
andcc %l0, _TIF_SIGPENDING, %g0
bne,pn %xcc, __handle_signal
__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2

View file

@ -247,7 +247,9 @@ static long _sigpause_common(old_sigset_t set)
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
set_restore_sigmask();
return -ERESTARTNOHAND;
}
@ -537,7 +539,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
} else
restart_syscall = 0;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
@ -566,13 +568,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
/* a signal was successfully delivered; the saved
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
* clear the TS_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
return;
}
if (restart_syscall &&
@ -591,17 +592,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tnpc -= 4;
}
/* if there's no signal to deliver, we just put the saved sigmask
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, orig_i0);
}

View file

@ -788,13 +788,12 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
syscall_restart32(orig_i0, regs, &ka.sa);
handle_signal32(signr, &ka, &info, oldset, regs);
/* a signal was successfully delivered; the saved
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
* clear the TS_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
return;
}
if (restart_syscall &&
@ -813,11 +812,11 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
regs->tnpc -= 4;
}
/* if there's no signal to deliver, we just put the saved sigmask
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}

View file

@ -542,8 +542,7 @@ asmlinkage long sparc64_personality(unsigned long personality)
return ret;
}
int sparc64_mmap_check(unsigned long addr, unsigned long len,
unsigned long flags)
int sparc64_mmap_check(unsigned long addr, unsigned long len)
{
if (test_thread_flag(TIF_32BIT)) {
if (len >= STACK_TOP32)
@ -609,46 +608,19 @@ asmlinkage unsigned long sys64_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
{
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
if (test_thread_flag(TIF_32BIT))
goto out;
if (unlikely(new_len >= VA_EXCLUDE_START))
goto out;
if (unlikely(invalid_64bit_range(addr, old_len)))
if (unlikely(sparc64_mmap_check(addr, old_len)))
goto out;
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);
if (flags & MREMAP_FIXED) {
if (invalid_64bit_range(new_addr, new_len))
goto out_sem;
} else if (invalid_64bit_range(addr, new_len)) {
unsigned long map_flags = 0;
struct file *file = NULL;
ret = -ENOMEM;
if (!(flags & MREMAP_MAYMOVE))
goto out_sem;
vma = find_vma(current->mm, addr);
if (vma) {
if (vma->vm_flags & VM_SHARED)
map_flags |= MAP_SHARED;
file = vma->vm_file;
}
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
flags |= MREMAP_FIXED;
}
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
out_sem:
up_write(&current->mm->mmap_sem);
out:
return ret;

View file

@ -867,44 +867,15 @@ asmlinkage unsigned long sys32_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, u32 __new_addr)
{
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
unsigned long new_addr = __new_addr;
if (old_len > STACK_TOP32 || new_len > STACK_TOP32)
if (unlikely(sparc64_mmap_check(addr, old_len)))
goto out;
if (addr > STACK_TOP32 - old_len)
if (unlikely(sparc64_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);
if (flags & MREMAP_FIXED) {
if (new_addr > STACK_TOP32 - new_len)
goto out_sem;
} else if (addr > STACK_TOP32 - new_len) {
unsigned long map_flags = 0;
struct file *file = NULL;
ret = -ENOMEM;
if (!(flags & MREMAP_MAYMOVE))
goto out_sem;
vma = find_vma(current->mm, addr);
if (vma) {
if (vma->vm_flags & VM_SHARED)
map_flags |= MAP_SHARED;
file = vma->vm_file;
}
/* MREMAP_FIXED checked above. */
new_addr = get_unmapped_area(file, addr, new_len,
vma ? vma->vm_pgoff : 0,
map_flags);
ret = new_addr;
if (new_addr & ~PAGE_MASK)
goto out_sem;
flags |= MREMAP_FIXED;
}
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
out_sem:
up_write(&current->mm->mmap_sem);
out:
return ret;

View file

@ -145,14 +145,14 @@ config LEGACY_PTYS
systems, it is safe to say N.
config RAW_DRIVER
tristate "RAW driver (/dev/raw/rawN) (OBSOLETE)"
tristate "RAW driver (/dev/raw/rawN)"
depends on BLOCK
help
The raw driver permits block devices to be bound to /dev/raw/rawN.
Once bound, I/O against /dev/raw/rawN uses efficient zero-copy I/O.
See the raw(8) manpage for more details.
The raw driver is deprecated and will be removed soon.
Applications should simply open the device (eg /dev/hda1)
Applications should preferably open the device (eg /dev/hda1)
with the O_DIRECT flag.
config MAX_RAW_DEVS

View file

@ -11,6 +11,7 @@
#include <termios.h>
#include <sys/ioctl.h>
#include "chan_user.h"
#include "kern_constants.h"
#include "os.h"
#include "um_malloc.h"
#include "user.h"

View file

@ -8,7 +8,7 @@
static inline void *cow_malloc(int size)
{
return kmalloc(size, UM_GFP_KERNEL);
return uml_kmalloc(size, UM_GFP_KERNEL);
}
static inline void cow_free(void *ptr)

View file

@ -34,7 +34,7 @@ static struct sockaddr_un *new_addr(void *name, int len)
{
struct sockaddr_un *sun;
sun = kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (sun == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un "
"failed\n");
@ -83,7 +83,7 @@ static int connect_to_switch(struct daemon_data *pri)
goto out_close;
}
sun = kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
sun = uml_kmalloc(sizeof(struct sockaddr_un), UM_GFP_KERNEL);
if (sun == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_un "
"failed\n");

View file

@ -40,7 +40,7 @@ static void *fd_init(char *str, int device, const struct chan_opts *opts)
return NULL;
}
data = kmalloc(sizeof(*data), UM_GFP_KERNEL);
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;

View file

@ -154,7 +154,7 @@ static int hostaudio_ioctl(struct inode *inode, struct file *file,
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(data, (int __user *) arg))
return EFAULT;
return -EFAULT;
break;
default:
break;

View file

@ -15,6 +15,7 @@
#include <unistd.h>
#include <errno.h>
#include <netinet/in.h>
#include "kern_constants.h"
#include "mcast.h"
#include "net_user.h"
#include "um_malloc.h"
@ -24,7 +25,7 @@ static struct sockaddr_in *new_addr(char *addr, unsigned short port)
{
struct sockaddr_in *sin;
sin = kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
sin = uml_kmalloc(sizeof(struct sockaddr_in), UM_GFP_KERNEL);
if (sin == NULL) {
printk(UM_KERN_ERR "new_addr: allocation of sockaddr_in "
"failed\n");

View file

@ -222,7 +222,7 @@ static void change(char *dev, char *what, unsigned char *addr,
netmask[2], netmask[3]);
output_len = UM_KERN_PAGE_SIZE;
output = kmalloc(output_len, UM_GFP_KERNEL);
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
if (output == NULL)
printk(UM_KERN_ERR "change : failed to allocate output "
"buffer\n");

View file

@ -47,7 +47,7 @@ static void *port_init(char *str, int device, const struct chan_opts *opts)
if (kern_data == NULL)
return NULL;
data = kmalloc(sizeof(*data), UM_GFP_KERNEL);
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
goto err;

View file

@ -29,7 +29,7 @@ static void *pty_chan_init(char *str, int device, const struct chan_opts *opts)
{
struct pty_chan *data;
data = kmalloc(sizeof(*data), UM_GFP_KERNEL);
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;

View file

@ -1,4 +1,5 @@
/* Copyright (C) 2005 Jeff Dike <jdike@addtoit.com> */
/* Copyright (C) 2005 - 2008 Jeff Dike <jdike@{linux.intel,addtoit}.com> */
/* Much of this ripped from drivers/char/hw_random.c, see there for other
* copyright.
*
@ -8,16 +9,18 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include "irq_kern.h"
#include "os.h"
/*
* core module and version information
*/
#define RNG_VERSION "1.0.0"
#define RNG_MODULE_NAME "random"
#define RNG_MODULE_NAME "hw_random"
#define RNG_MISCDEV_MINOR 183 /* official */
@ -26,47 +29,67 @@
* protects against a module being loaded twice at the same time.
*/
static int random_fd = -1;
static DECLARE_WAIT_QUEUE_HEAD(host_read_wait);
static int rng_dev_open (struct inode *inode, struct file *filp)
{
/* enforce read-only access to this chrdev */
if ((filp->f_mode & FMODE_READ) == 0)
return -EINVAL;
if (filp->f_mode & FMODE_WRITE)
if ((filp->f_mode & FMODE_WRITE) != 0)
return -EINVAL;
return 0;
}
static atomic_t host_sleep_count = ATOMIC_INIT(0);
static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
loff_t * offp)
loff_t *offp)
{
u32 data;
int n, ret = 0, have_data;
u32 data;
int n, ret = 0, have_data;
while(size){
n = os_read_file(random_fd, &data, sizeof(data));
if(n > 0){
have_data = n;
while (have_data && size) {
if (put_user((u8)data, buf++)) {
ret = ret ? : -EFAULT;
break;
}
size--;
ret++;
have_data--;
data>>=8;
}
}
else if(n == -EAGAIN){
if (filp->f_flags & O_NONBLOCK)
return ret ? : -EAGAIN;
while (size) {
n = os_read_file(random_fd, &data, sizeof(data));
if (n > 0) {
have_data = n;
while (have_data && size) {
if (put_user((u8) data, buf++)) {
ret = ret ? : -EFAULT;
break;
}
size--;
ret++;
have_data--;
data >>= 8;
}
}
else if (n == -EAGAIN) {
DECLARE_WAITQUEUE(wait, current);
if (filp->f_flags & O_NONBLOCK)
return ret ? : -EAGAIN;
atomic_inc(&host_sleep_count);
reactivate_fd(random_fd, RANDOM_IRQ);
add_sigio_fd(random_fd);
add_wait_queue(&host_read_wait, &wait);
set_task_state(current, TASK_INTERRUPTIBLE);
schedule();
set_task_state(current, TASK_RUNNING);
remove_wait_queue(&host_read_wait, &wait);
if (atomic_dec_and_test(&host_sleep_count)) {
ignore_sigio_fd(random_fd);
deactivate_fd(random_fd, RANDOM_IRQ);
}
}
else
return n;
if(need_resched())
schedule_timeout_interruptible(1);
}
else return n;
if (signal_pending (current))
return ret ? : -ERESTARTSYS;
}
@ -86,6 +109,13 @@ static struct miscdevice rng_miscdev = {
&rng_chrdev_ops,
};
static irqreturn_t random_interrupt(int irq, void *data)
{
wake_up(&host_read_wait);
return IRQ_HANDLED;
}
/*
* rng_init - initialize RNG module
*/
@ -93,28 +123,33 @@ static int __init rng_init (void)
{
int err;
err = os_open_file("/dev/random", of_read(OPENFLAGS()), 0);
if(err < 0)
goto out;
err = os_open_file("/dev/random", of_read(OPENFLAGS()), 0);
if (err < 0)
goto out;
random_fd = err;
random_fd = err;
err = os_set_fd_block(random_fd, 0);
if(err)
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "random",
NULL);
if (err)
goto err_out_cleanup_hw;
sigio_broken(random_fd, 1);
err = misc_register (&rng_miscdev);
if (err) {
printk (KERN_ERR RNG_MODULE_NAME ": misc device register failed\n");
printk (KERN_ERR RNG_MODULE_NAME ": misc device register "
"failed\n");
goto err_out_cleanup_hw;
}
out:
return err;
out:
return err;
err_out_cleanup_hw:
random_fd = -1;
goto out;
err_out_cleanup_hw:
os_close_file(random_fd);
random_fd = -1;
goto out;
}
/*
@ -122,6 +157,7 @@ static int __init rng_init (void)
*/
static void __exit rng_cleanup (void)
{
os_close_file(random_fd);
misc_deregister (&rng_miscdev);
}

View file

@ -96,7 +96,7 @@ static int slip_tramp(char **argv, int fd)
pid = err;
output_len = UM_KERN_PAGE_SIZE;
output = kmalloc(output_len, UM_GFP_KERNEL);
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
if (output == NULL) {
printk(UM_KERN_ERR "slip_tramp : failed to allocate output "
"buffer\n");

View file

@ -29,7 +29,7 @@ static void *tty_chan_init(char *str, int device, const struct chan_opts *opts)
}
str++;
data = kmalloc(sizeof(*data), UM_GFP_KERNEL);
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct tty_chan) { .dev = str,

View file

@ -1178,8 +1178,8 @@ static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask,
* by one word. Thanks to Lynn Kerby for the fix and James McMechan
* for the original diagnosis.
*/
if(*cow_offset == ((bitmap_len + sizeof(unsigned long) - 1) /
sizeof(unsigned long) - 1))
if (*cow_offset == (DIV_ROUND_UP(bitmap_len,
sizeof(unsigned long)) - 1))
(*cow_offset)--;
bitmap_words[0] = bitmap[*cow_offset];

View file

@ -30,7 +30,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
{
struct xterm_chan *data;
data = kmalloc(sizeof(*data), UM_GFP_KERNEL);
data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL);
if (data == NULL)
return NULL;
*data = ((struct xterm_chan) { .pid = -1,

View file

@ -23,16 +23,16 @@
*/
#ifdef __ASSEMBLY__
#define _AC(X, Y) (Y)
#define _UML_AC(X, Y) (Y)
#else
#define __AC(X, Y) (X (Y))
#define _AC(X, Y) __AC(X, Y)
#define __UML_AC(X, Y) (X(Y))
#define _UML_AC(X, Y) __UML_AC(X, Y)
#endif
#define STUB_START _AC(, 0x100000)
#define STUB_CODE _AC((unsigned long), STUB_START)
#define STUB_DATA _AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
#define STUB_END _AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
#define STUB_START _UML_AC(, 0x100000)
#define STUB_CODE _UML_AC((unsigned long), STUB_START)
#define STUB_DATA _UML_AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
#define STUB_END _UML_AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
#ifndef __ASSEMBLY__

View file

@ -58,11 +58,11 @@ struct line {
};
#define LINE_INIT(str, d) \
{ .count_lock = SPIN_LOCK_UNLOCKED, \
{ .count_lock = __SPIN_LOCK_UNLOCKED((str).count_lock), \
.init_str = str, \
.init_pri = INIT_STATIC, \
.valid = 1, \
.lock = SPIN_LOCK_UNLOCKED, \
.lock = __SPIN_LOCK_UNLOCKED((str).lock), \
.driver = d }
extern void line_close(struct tty_struct *tty, struct file * filp);

View file

@ -290,6 +290,7 @@ extern void os_set_ioignore(void);
extern int add_sigio_fd(int fd);
extern int ignore_sigio_fd(int fd);
extern void maybe_sigio_broken(int fd, int read);
extern void sigio_broken(int fd, int read);
/* sys-x86_64/prctl.c */
extern int os_arch_prctl(int pid, int code, unsigned long *addr);

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@ -8,18 +8,10 @@
#include <signal.h>
extern void sig_handler(int sig, struct sigcontext sc);
extern void alarm_handler(int sig, struct sigcontext sc);
/* Copied from linux/compiler-gcc.h since we can't include it directly */
#define barrier() __asm__ __volatile__("": : :"memory")
extern void sig_handler(int sig, struct sigcontext *sc);
extern void alarm_handler(int sig, struct sigcontext *sc);
#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@ -12,14 +12,3 @@
#include "sysdep/skas_ptrace.h"
#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View file

@ -41,38 +41,10 @@
#define PT_SP_OFFSET PT_OFFSET(UESP)
#define PT_SP(regs) ((regs)[UESP])
#define FP_SIZE ((HOST_XFP_SIZE > HOST_FP_SIZE) ? HOST_XFP_SIZE : HOST_FP_SIZE)
#define FP_SIZE ((HOST_FPX_SIZE > HOST_FP_SIZE) ? HOST_FPX_SIZE : HOST_FP_SIZE)
#ifndef FRAME_SIZE
#define FRAME_SIZE (17)
#endif
#define FRAME_SIZE_OFFSET (FRAME_SIZE * sizeof(unsigned long))
#define FP_FRAME_SIZE (27)
#define FPX_FRAME_SIZE (128)
#ifdef PTRACE_GETREGS
#define UM_HAVE_GETREGS
#endif
#ifdef PTRACE_SETREGS
#define UM_HAVE_SETREGS
#endif
#ifdef PTRACE_GETFPREGS
#define UM_HAVE_GETFPREGS
#endif
#ifdef PTRACE_SETFPREGS
#define UM_HAVE_SETFPREGS
#endif
#ifdef PTRACE_GETFPXREGS
#define UM_HAVE_GETFPXREGS
#endif
#ifdef PTRACE_SETFPXREGS
#define UM_HAVE_SETFPXREGS
#endif
#endif

View file

@ -10,7 +10,7 @@
#define IP_RESTART_SYSCALL(ip) ((ip) -= 2)
#define GET_FAULTINFO_FROM_SC(fi,sc) \
#define GET_FAULTINFO_FROM_SC(fi, sc) \
{ \
(fi).cr2 = SC_CR2(sc); \
(fi).error_code = SC_ERR(sc); \

View file

@ -48,7 +48,8 @@
#define PT_ORIG_RAX_OFFSET (ORIG_RAX)
#define PT_ORIG_RAX(regs) ((regs)[PT_INDEX(ORIG_RAX)])
/* x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
/*
* x86_64 FC3 doesn't define this in /usr/include/linux/ptrace.h even though
* it's defined in the kernel's include/linux/ptrace.h. Additionally, use the
* 2.4 name and value for 2.4 host compatibility.
*/
@ -56,7 +57,8 @@
#define PTRACE_OLDSETOPTIONS 21
#endif
/* These are before the system call, so the system call number is RAX
/*
* These are before the system call, so the system call number is RAX
* rather than ORIG_RAX, and arg4 is R10 rather than RCX
*/
#define REGS_SYSCALL_NR PT_INDEX(RAX)
@ -73,14 +75,3 @@
#define FP_SIZE (HOST_FP_SIZE)
#endif
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically
* adjust the settings for this buffer only. This must remain at the end
* of the file.
* ---------------------------------------------------------------------------
* Local variables:
* c-file-style: "linux"
* End:
*/

View file

@ -8,15 +8,12 @@
#include "kern_constants.h"
extern void *__kmalloc(int size, int flags);
static inline void *kmalloc(int size, int flags)
{
return __kmalloc(size, flags);
}
extern void *uml_kmalloc(int size, int flags);
extern void kfree(const void *ptr);
extern void *vmalloc(unsigned long size);
extern void vfree(void *ptr);
#endif /* __UM_MALLOC_H__ */

View file

@ -1,4 +1,5 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
OUTPUT_ARCH(ELF_ARCH)
@ -21,7 +22,7 @@ SECTIONS
_einittext = .;
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
/* Read-only sections, merged into text segment: */
.hash : { *(.hash) }
@ -68,9 +69,9 @@ SECTIONS
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
} =0x90909090
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.syscall_stub : {
__syscall_stub_start = .;
*(.__syscall_stub*)

View file

@ -375,3 +375,8 @@ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd;
}
#endif
void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}

View file

@ -75,7 +75,7 @@ static irqreturn_t um_timer(int irq, void *dev)
static cycle_t itimer_read(void)
{
return os_nsecs();
return os_nsecs() / 1000;
}
static struct clocksource itimer_clocksource = {
@ -83,7 +83,7 @@ static struct clocksource itimer_clocksource = {
.rating = 300,
.read = itimer_read,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1,
.mult = 1000,
.shift = 0,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};

View file

@ -150,7 +150,7 @@ __uml_setup("root=", uml_root_setup,
static int __init no_skas_debug_setup(char *line, int *add)
{
printf("'debug' is not necessary to gdb UML in skas mode - run \n");
printf("'gdb linux'");
printf("'gdb linux'\n");
return 0;
}
@ -258,6 +258,7 @@ int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
unsigned long virtmem_size, max_physmem;
unsigned long stack;
unsigned int i;
int add;
char * mode;
@ -348,7 +349,9 @@ int __init linux_main(int argc, char **argv)
}
virtmem_size = physmem_size;
avail = TASK_SIZE - start_vm;
stack = (unsigned long) argv;
stack &= ~(1024 * 1024 - 1);
avail = stack - start_vm;
if (physmem_size > avail)
virtmem_size = avail;
end_vm = start_vm + virtmem_size;

View file

@ -1,4 +1,5 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
OUTPUT_ARCH(ELF_ARCH)
@ -26,7 +27,7 @@ SECTIONS
INIT_TEXT
_einittext = .;
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.text :
{
@ -39,7 +40,7 @@ SECTIONS
*(.gnu.linkonce.t*)
}
. = ALIGN(4096);
. = ALIGN(PAGE_SIZE);
.syscall_stub : {
__syscall_stub_start = .;
*(.__syscall_stub*)
@ -79,7 +80,7 @@ SECTIONS
.sdata : { *(.sdata) }
_edata = .;
PROVIDE (edata = .);
. = ALIGN(0x1000);
. = ALIGN(PAGE_SIZE);
.sbss :
{
__bss_start = .;

View file

@ -52,7 +52,7 @@ static void etap_change(int op, unsigned char *addr, unsigned char *netmask,
return;
}
output = kmalloc(UM_KERN_PAGE_SIZE, UM_GFP_KERNEL);
output = uml_kmalloc(UM_KERN_PAGE_SIZE, UM_GFP_KERNEL);
if (output == NULL)
printk(UM_KERN_ERR "etap_change : Failed to allocate output "
"buffer\n");
@ -165,7 +165,7 @@ static int etap_open(void *data)
err = etap_tramp(pri->dev_name, pri->gate_addr, control_fds[0],
control_fds[1], data_fds[0], data_fds[1]);
output_len = UM_KERN_PAGE_SIZE;
output = kmalloc(output_len, UM_GFP_KERNEL);
output = uml_kmalloc(output_len, UM_GFP_KERNEL);
read_output(control_fds[0], output, output_len);
if (output == NULL)

View file

@ -71,8 +71,8 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
data.pre_data = pre_data;
data.argv = argv;
data.fd = fds[1];
data.buf = __cant_sleep() ? kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
kmalloc(PATH_MAX, UM_GFP_KERNEL);
data.buf = __cant_sleep() ? uml_kmalloc(PATH_MAX, UM_GFP_ATOMIC) :
uml_kmalloc(PATH_MAX, UM_GFP_KERNEL);
pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
if (pid < 0) {
ret = -errno;

View file

@ -199,7 +199,7 @@ void *__wrap_malloc(int size)
return __real_malloc(size);
else if (size <= UM_KERN_PAGE_SIZE)
/* finding contiguous pages can be hard*/
ret = kmalloc(size, UM_GFP_KERNEL);
ret = uml_kmalloc(size, UM_GFP_KERNEL);
else ret = vmalloc(size);
/*

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright (C) 2002 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@ -15,6 +15,7 @@
#include "kern_util.h"
#include "init.h"
#include "os.h"
#include "process.h"
#include "sigio.h"
#include "um_malloc.h"
#include "user.h"
@ -109,7 +110,7 @@ static int need_poll(struct pollfds *polls, int n)
if (n <= polls->size)
return 0;
new = kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
new = uml_kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
if (new == NULL) {
printk(UM_KERN_ERR "need_poll : failed to allocate new "
"pollfds\n");
@ -243,7 +244,7 @@ static struct pollfd *setup_initial_poll(int fd)
{
struct pollfd *p;
p = kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
p = uml_kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
if (p == NULL) {
printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
"poll\n");
@ -338,20 +339,10 @@ out_close1:
close(l_write_sigio_fds[1]);
}
/* Changed during early boot */
static int pty_output_sigio = 0;
static int pty_close_sigio = 0;
void maybe_sigio_broken(int fd, int read)
void sigio_broken(int fd, int read)
{
int err;
if (!isatty(fd))
return;
if ((read || pty_output_sigio) && (!read || pty_close_sigio))
return;
write_sigio_workaround();
sigio_lock();
@ -370,6 +361,21 @@ out:
sigio_unlock();
}
/* Changed during early boot */
static int pty_output_sigio;
static int pty_close_sigio;
void maybe_sigio_broken(int fd, int read)
{
if (!isatty(fd))
return;
if ((read || pty_output_sigio) && (!read || pty_close_sigio))
return;
sigio_broken(fd, read);
}
static void sigio_cleanup(void)
{
if (write_sigio_pid == -1)
@ -383,7 +389,7 @@ static void sigio_cleanup(void)
__uml_exitcall(sigio_cleanup);
/* Used as a flag during SIGIO testing early in boot */
static volatile int got_sigio = 0;
static int got_sigio;
static void __init handler(int sig)
{
@ -498,7 +504,8 @@ static void tty_output(int master, int slave)
if (errno != EAGAIN)
printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
errno);
while (((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio)
while (((n = read(slave, buf, sizeof(buf))) > 0) &&
!({ barrier(); got_sigio; }))
;
if (got_sigio) {

View file

@ -12,6 +12,7 @@
#include "as-layout.h"
#include "kern_util.h"
#include "os.h"
#include "process.h"
#include "sysdep/barrier.h"
#include "sysdep/sigcontext.h"
#include "user.h"

View file

@ -442,7 +442,7 @@ void userspace(struct uml_pt_regs *regs)
unblock_signals();
break;
default:
printk(UM_KERN_ERR "userspace - child stopped "
printk(UM_KERN_ERR "userspace - child stopped "
"with signal %d\n", sig);
fatal_sigsegv();
}

View file

@ -23,6 +23,7 @@
#include "mem_user.h"
#include "ptrace_user.h"
#include "registers.h"
#include "skas.h"
#include "skas_ptrace.h"
static void ptrace_child(void)
@ -140,14 +141,27 @@ static int stop_ptraced_child(int pid, int exitcode, int mustexit)
}
/* Changed only during early boot */
int ptrace_faultinfo = 1;
int ptrace_ldt = 1;
int proc_mm = 1;
int skas_needs_stub = 0;
int ptrace_faultinfo;
static int disable_ptrace_faultinfo;
int ptrace_ldt;
static int disable_ptrace_ldt;
int proc_mm;
static int disable_proc_mm;
int have_switch_mm;
static int disable_switch_mm;
int skas_needs_stub;
static int __init skas0_cmd_param(char *str, int* add)
{
ptrace_faultinfo = proc_mm = 0;
disable_ptrace_faultinfo = 1;
disable_ptrace_ldt = 1;
disable_proc_mm = 1;
disable_switch_mm = 1;
return 0;
}
@ -157,15 +171,12 @@ static int __init mode_skas0_cmd_param(char *str, int* add)
__attribute__((alias("skas0_cmd_param")));
__uml_setup("skas0", skas0_cmd_param,
"skas0\n"
" Disables SKAS3 usage, so that SKAS0 is used, unless \n"
" you specify mode=tt.\n\n");
"skas0\n"
" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n");
__uml_setup("mode=skas0", mode_skas0_cmd_param,
"mode=skas0\n"
" Disables SKAS3 usage, so that SKAS0 is used, unless you \n"
" specify mode=tt. Note that this was recently added - on \n"
" older kernels you must use simply \"skas0\".\n\n");
"mode=skas0\n"
" Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n");
/* Changed only during early boot */
static int force_sysemu_disabled = 0;
@ -360,7 +371,7 @@ void __init os_early_checks(void)
static int __init noprocmm_cmd_param(char *str, int* add)
{
proc_mm = 0;
disable_proc_mm = 1;
return 0;
}
@ -372,7 +383,7 @@ __uml_setup("noprocmm", noprocmm_cmd_param,
static int __init noptracefaultinfo_cmd_param(char *str, int* add)
{
ptrace_faultinfo = 0;
disable_ptrace_faultinfo = 1;
return 0;
}
@ -384,7 +395,7 @@ __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param,
static int __init noptraceldt_cmd_param(char *str, int* add)
{
ptrace_ldt = 0;
disable_ptrace_ldt = 1;
return 0;
}
@ -404,17 +415,15 @@ static inline void check_skas3_ptrace_faultinfo(void)
n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
if (n < 0) {
ptrace_faultinfo = 0;
if (errno == EIO)
non_fatal("not found\n");
else
perror("not found");
}
} else if (disable_ptrace_faultinfo)
non_fatal("found but disabled on command line\n");
else {
if (!ptrace_faultinfo)
non_fatal("found but disabled on command line\n");
else
non_fatal("found\n");
ptrace_faultinfo = 1;
non_fatal("found\n");
}
stop_ptraced_child(pid, 1, 1);
@ -437,38 +446,30 @@ static inline void check_skas3_ptrace_ldt(void)
if (n < 0) {
if (errno == EIO)
non_fatal("not found\n");
else {
perror("not found");
}
ptrace_ldt = 0;
}
else {
if (ptrace_ldt)
non_fatal("found\n");
else
non_fatal("found, but use is disabled\n");
perror("not found");
} else if (disable_ptrace_ldt)
non_fatal("found, but use is disabled\n");
else {
ptrace_ldt = 1;
non_fatal("found\n");
}
stop_ptraced_child(pid, 1, 1);
#else
/* PTRACE_LDT might be disabled via cmdline option.
* We want to override this, else we might use the stub
* without real need
*/
ptrace_ldt = 1;
#endif
}
static inline void check_skas3_proc_mm(void)
{
non_fatal(" - /proc/mm...");
if (access("/proc/mm", W_OK) < 0) {
proc_mm = 0;
if (access("/proc/mm", W_OK) < 0)
perror("not found");
}
else if (!proc_mm)
else if (disable_proc_mm)
non_fatal("found but disabled on command line\n");
else non_fatal("found\n");
else {
proc_mm = 1;
non_fatal("found\n");
}
}
void can_do_skas(void)

Some files were not shown because too many files have changed in this diff Show more