Merge branch 'linus' into x86/gart

This commit is contained in:
Ingo Molnar 2008-06-16 11:27:18 +02:00
commit 33ee375b2e
258 changed files with 2740 additions and 1904 deletions

View file

@ -21,6 +21,11 @@ This driver is known to work with the following cards:
* SA E200
* SA E200i
* SA E500
* SA P212
* SA P410
* SA P410i
* SA P411
* SA P812
Detecting drive failures:
-------------------------

View file

@ -36,6 +36,7 @@ files, each with their own function.
local_cpus nearby CPU mask (cpumask, ro)
resource PCI resource host addresses (ascii, ro)
resource0..N PCI resource N, if present (binary, mmap)
resource0_wc..N_wc PCI WC map resource N, if prefetchable (binary, mmap)
rom PCI ROM resource, if present (binary, ro)
subsystem_device PCI subsystem device (ascii, ro)
subsystem_vendor PCI subsystem vendor (ascii, ro)

View file

@ -305,7 +305,7 @@ should not be manipulated by any other user.
A kset keeps its children in a standard kernel linked list. Kobjects point
back to their containing kset via their kset field. In almost all cases,
the kobjects belonging to a ket have that kset (or, strictly, its embedded
the kobjects belonging to a kset have that kset (or, strictly, its embedded
kobject) in their parent.
As a kset contains a kobject within it, it should always be dynamically

View file

@ -503,7 +503,7 @@ generate input device EV_KEY events.
In addition to the EV_KEY events, thinkpad-acpi may also issue EV_SW
events for switches:
SW_RADIO T60 and later hardare rfkill rocker switch
SW_RFKILL_ALL T60 and later hardare rfkill rocker switch
SW_TABLET_MODE Tablet ThinkPads HKEY events 0x5009 and 0x500A
Non hot-key ACPI HKEY event map:

View file

@ -228,21 +228,21 @@ ACPI BATTERY DRIVERS
P: Alexey Starikovskiy
M: astarikovskiy@suse.de
L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI EC DRIVER
P: Alexey Starikovskiy
M: astarikovskiy@suse.de
L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI FAN DRIVER
P: Len Brown
M: len.brown@intel.com
L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI PCI HOTPLUG DRIVER
@ -255,14 +255,14 @@ ACPI THERMAL DRIVER
P: Len Brown
M: len.brown@intel.com
L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI VIDEO DRIVER
P: Rui Zhang
M: rui.zhang@intel.com
L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
W: http://www.lesswatts.org/projects/acpi/
S: Supported
ACPI WMI DRIVER
@ -274,7 +274,7 @@ S: Maintained
AD1889 ALSA SOUND DRIVER
P: Kyle McMartin
M: kyle@parisc-linux.org
M: kyle@mcmartin.ca
P: Thibaut Varene
M: T-Bone@parisc-linux.org
W: http://wiki.parisc-linux.org/AD1889
@ -995,8 +995,8 @@ L: netdev@vger.kernel.org
S: Supported
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
P: Eliezer Tamir
M: eliezert@broadcom.com
P: Eilon Greenstein
M: eilong@broadcom.com
L: netdev@vger.kernel.org
S: Supported
@ -1202,6 +1202,7 @@ M: pj@sgi.com
M: menage@google.com
L: linux-kernel@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
S: Supported
CRAMFS FILESYSTEM
@ -1827,7 +1828,7 @@ S: Maintained
HARMONY SOUND DRIVER
P: Kyle McMartin
M: kyle@parisc-linux.org
M: kyle@mcmartin.ca
L: linux-parisc@vger.kernel.org
S: Maintained
@ -3120,7 +3121,7 @@ S: Maintained
PARISC ARCHITECTURE
P: Kyle McMartin
M: kyle@parisc-linux.org
M: kyle@mcmartin.ca
P: Matthew Wilcox
M: matthew@wil.cx
P: Grant Grundler
@ -3329,9 +3330,11 @@ L: video4linux-list@redhat.com
W: http://www.isely.net/pvrusb2/
S: Maintained
PXA2xx SUPPORT
P: Nicolas Pitre
M: nico@cam.org
PXA2xx/PXA3xx SUPPORT
P: Eric Miao
M: eric.miao@marvell.com
P: Russell King
M: linux@arm.linux.org.uk
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
S: Maintained
@ -3658,13 +3661,6 @@ M: romieu@fr.zoreil.com
L: netdev@vger.kernel.org
S: Maintained
SIS 5513 IDE CONTROLLER DRIVER
P: Lionel Bouton
M: Lionel.Bouton@inet6.fr
W: http://inet6.dyn.dhs.org/sponsoring/sis5513/index.html
W: http://gyver.homeip.net/sis5513/index.html
S: Maintained
SIS 900/7016 FAST ETHERNET DRIVER
P: Daniele Venzano
M: venza@brownhat.org
@ -4030,7 +4026,7 @@ TULIP NETWORK DRIVERS
P: Grant Grundler
M: grundler@parisc-linux.org
P: Kyle McMartin
M: kyle@parisc-linux.org
M: kyle@mcmartin.ca
L: netdev@vger.kernel.org
S: Maintained

View file

@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 26
EXTRAVERSION = -rc5
NAME = Funky Weasel is Jiggy wit it
EXTRAVERSION = -rc6
NAME = Rotary Wombat
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View file

@ -330,7 +330,7 @@ struct ssp_device *ssp_request(int port, const char *label)
mutex_unlock(&ssp_lock);
if (ssp->port_id != port)
if (&ssp->node == &ssp_list)
return NULL;
return ssp;

View file

@ -465,7 +465,6 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
printk(KERN_ERR
"ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
len, slit->header.length);
memset(numa_slit, 10, sizeof(numa_slit));
return;
}
slit_table = slit;
@ -574,8 +573,14 @@ void __init acpi_numa_arch_fixup(void)
printk(KERN_INFO "Number of memory chunks in system = %d\n",
num_node_memblks);
if (!slit_table)
if (!slit_table) {
for (i = 0; i < MAX_NUMNODES; i++)
for (j = 0; j < MAX_NUMNODES; j++)
node_distance(i, j) = i == j ? LOCAL_DISTANCE :
REMOTE_DISTANCE;
return;
}
memset(numa_slit, -1, sizeof(numa_slit));
for (i = 0; i < slit_table->locality_count; i++) {
if (!pxm_bit_test(i))

View file

@ -159,7 +159,8 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
if (p->u.ioreq.state == STATE_IORESP_READY) {
if (dir == IOREQ_READ)
*dest = p->u.ioreq.data;
/* it's necessary to ensure zero extending */
*dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
} else
panic_vm(vcpu);
out:

View file

@ -111,7 +111,13 @@ void hw_timer_init(void)
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
mcftmr_cycles_per_jiffy = FREQ / HZ;
__raw_writetrr(mcftmr_cycles_per_jiffy, TA(MCFTIMER_TRR));
/*
* The coldfire timer runs from 0 to TRR included, then 0
* again and so on. It counts thus actually TRR + 1 steps
* for 1 tick, not TRR. So if you want n cycles,
* initialize TRR with n - 1.
*/
__raw_writetrr(mcftmr_cycles_per_jiffy - 1, TA(MCFTIMER_TRR));
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR));

View file

@ -13,10 +13,9 @@
#include <asm/unistd.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
__HEAD
.text
.import hpux_call_table
.import hpux_syscall_exit,code

View file

@ -28,10 +28,9 @@
#include <asm/assembly.h>
#include <asm/signal.h>
#include <linux/linkage.h>
#include <linux/init.h>
.level LEVEL
__HEAD
.text
/* These should probably go in a header file somewhere.
* They are duplicated in kernel/wrappers.S

View file

@ -38,7 +38,6 @@
#include <asm/thread_info.h>
#include <linux/linkage.h>
#include <linux/init.h>
#ifdef CONFIG_64BIT
.level 2.0w
@ -622,7 +621,7 @@
* the static part of the kernel address space.
*/
__HEAD
.text
.align PAGE_SIZE

View file

@ -33,6 +33,7 @@ ENTRY(boot_args)
END(boot_args)
__HEAD
.align 4
.import init_thread_union,data
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */

View file

@ -47,7 +47,6 @@
#include <asm/pdc.h>
#include <linux/linkage.h>
#include <linux/init.h>
/*
* stack for os_hpmc, the HPMC handler.
@ -77,7 +76,7 @@ ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
END(hpmc_pim_data)
__HEAD
.text
.import intr_save, code
ENTRY(os_hpmc)

View file

@ -37,9 +37,8 @@
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <linux/linkage.h>
#include <linux/init.h>
__HEAD
.text
.align 128
ENTRY(flush_tlb_all_local)

View file

@ -152,3 +152,6 @@ EXPORT_SYMBOL($$dyncall);
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(pfnnid_map);
#endif
/* from pacache.S -- needed for copy_page */
EXPORT_SYMBOL(copy_user_page_asm);

View file

@ -43,7 +43,7 @@
; The coprocessor only needs to be enabled when
; starting/stopping the coprocessor with the pmenb/pmdis.
;
__HEAD
.text
ENTRY(perf_intrigue_enable_perf_counters)
.proc

View file

@ -12,7 +12,6 @@
#include <asm/assembly.h>
#include <linux/linkage.h>
#include <linux/init.h>
.section .bss
.export real_stack
@ -40,7 +39,7 @@ save_cr_end:
/************************ 32-bit real-mode calls ***********************/
/* This can be called in both narrow and wide kernels */
__HEAD
.text
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
@ -114,7 +113,7 @@ ENDPROC(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
__HEAD
.text
save_control_regs:
load32 PA(save_cr_space), %r28
PUSH_CR(%cr24, %r28)
@ -146,7 +145,7 @@ restore_control_regs:
/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
* more general-purpose use by the several places which need RFIs
*/
__HEAD
.text
.align 128
rfi_virt2real:
/* switch to real mode... */
@ -181,7 +180,7 @@ rfi_v2r_1:
bv 0(%r2)
nop
__HEAD
.text
.align 128
rfi_real2virt:
rsm PSW_SM_I,%r0
@ -219,7 +218,7 @@ rfi_r2v_1:
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
__HEAD
.text
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
@ -277,7 +276,7 @@ ENDPROC(real64_call_asm)
#endif
__HEAD
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.

View file

@ -289,7 +289,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
&sc->sc_iaoq[0], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)(regs->gr[32] >> 32);
compat_reg = (compat_uint_t)(regs->gr[31] >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
@ -299,7 +299,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
&sc->sc_iaoq[1], compat_reg);
/* Store upper half */
compat_reg = (compat_uint_t)((regs->gr[32]+4) >> 32);
compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32);
err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);

View file

@ -17,7 +17,6 @@
#include <asm/processor.h>
#include <linux/linkage.h>
#include <linux/init.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
@ -27,7 +26,7 @@
.level LEVEL
__HEAD
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
@ -637,7 +636,7 @@ END(sys_call_table64)
All light-weight-syscall atomic operations
will use this set of locks
*/
.section .data, "aw"
.section .data
.align PAGE_SIZE
ENTRY(lws_lock_start)
/* lws locks */

View file

@ -50,6 +50,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text ALIGN(16) : {
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT

View file

@ -23,7 +23,6 @@
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
@ -56,7 +55,7 @@
.level LEVEL
__HEAD
.text
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */

View file

@ -33,12 +33,11 @@
*/
.text
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#include <linux/init.h>
__HEAD
/*
* get_sr gets the appropriate space value into

View file

@ -547,6 +547,7 @@ void __init mem_init(void)
}
unsigned long *empty_zero_page __read_mostly;
EXPORT_SYMBOL(empty_zero_page);
void show_mem(void)
{

View file

@ -116,8 +116,6 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
struct page *page = vcpu->arch.shadow_pages[index];
kunmap(vcpu->arch.shadow_pages[index]);
if (get_tlb_v(stlbe)) {
if (kvmppc_44x_tlbe_is_writable(stlbe))
kvm_release_page_dirty(page);
@ -144,18 +142,19 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
stlbe = &vcpu->arch.shadow_tlb[victim];
/* Get reference to new page. */
down_write(&current->mm->mmap_sem);
down_read(&current->mm->mmap_sem);
new_page = gfn_to_page(vcpu->kvm, gfn);
if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page!\n");
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
kvm_release_page_clean(new_page);
up_read(&current->mm->mmap_sem);
return;
}
hpaddr = page_to_phys(new_page);
/* Drop reference to old page. */
kvmppc_44x_shadow_release(vcpu, victim);
up_write(&current->mm->mmap_sem);
up_read(&current->mm->mmap_sem);
vcpu->arch.shadow_pages[victim] = new_page;

View file

@ -227,39 +227,6 @@ void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
}
}
static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
enum emulation_result er;
int r;
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
case EMULATE_DONE:
/* Future optimization: only reload non-volatiles if they were
* actually modified. */
r = RESUME_GUEST_NV;
break;
case EMULATE_DO_MMIO:
run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were
* actually modified. */
r = RESUME_HOST_NV;
break;
case EMULATE_FAIL:
/* XXX Deliver Program interrupt to guest. */
printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
vcpu->arch.last_inst);
r = RESUME_HOST;
break;
default:
BUG();
}
return r;
}
/**
* kvmppc_handle_exit
*

View file

@ -246,6 +246,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 31:
switch (get_xop(inst)) {
case 23: /* lwzx */
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case 83: /* mfmsr */
rt = get_rt(inst);
vcpu->arch.gpr[rt] = vcpu->arch.msr;
@ -267,6 +272,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
break;
case 151: /* stwx */
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
vcpu->arch.gpr[rs],
4, 1);
break;
case 163: /* wrteei */
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
| (inst & MSR_EE);

View file

@ -304,6 +304,7 @@ config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
select SPARSEMEM_VMEMMAP
select SPARSEMEM_STATIC if !64BIT
config ARCH_SPARSEMEM_DEFAULT
def_bool y

View file

@ -711,7 +711,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
memset(sf, 0, sizeof(struct stack_frame));
sf->gprs[9] = (unsigned long) sf;
cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
asm volatile(
" stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");

View file

@ -20,7 +20,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++;
vcpu_put(vcpu);
schedule();
yield();
vcpu_load(vcpu);
return 0;
}

View file

@ -339,6 +339,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
if (kvm_cpu_has_interrupt(vcpu))
return 0;
__set_cpu_idle(vcpu);
spin_lock_bh(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.timer_due = 0;
spin_unlock_bh(&vcpu->arch.local_int.lock);
if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
__unset_cpu_idle(vcpu);
@ -366,8 +371,6 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
no_timer:
spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
__set_cpu_idle(vcpu);
vcpu->arch.local_int.timer_due = 0;
add_wait_queue(&vcpu->arch.local_int.wq, &wait);
while (list_empty(&vcpu->arch.local_int.list) &&
list_empty(&vcpu->arch.local_int.float_int->list) &&

View file

@ -423,6 +423,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return -EINVAL; /* not implemented yet */
}
extern void s390_handle_mcck(void);
static void __vcpu_run(struct kvm_vcpu *vcpu)
{
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
@ -430,13 +432,21 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
if (need_resched())
schedule();
if (test_thread_flag(TIF_MCCK_PENDING))
s390_handle_mcck();
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
local_irq_disable();
kvm_guest_enter();
local_irq_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
local_irq_disable();
@ -475,7 +485,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
might_sleep();
do {
kvm_s390_deliver_pending_interrupts(vcpu);
__vcpu_run(vcpu);
rc = kvm_handle_sie_intercept(vcpu);
} while (!signal_pending(current) && !rc);

View file

@ -254,36 +254,46 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
int s390_enable_sie(void)
{
struct task_struct *tsk = current;
struct mm_struct *mm;
int rc;
struct mm_struct *mm, *old_mm;
task_lock(tsk);
rc = 0;
/* Do we have pgstes? if yes, we are done */
if (tsk->mm->context.pgstes)
goto unlock;
return 0;
rc = -EINVAL;
/* lets check if we are allowed to replace the mm */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
goto unlock;
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
task_unlock(tsk);
return -EINVAL;
}
task_unlock(tsk);
tsk->mm->context.pgstes = 1; /* dirty little tricks .. */
/* we copy the mm with pgstes enabled */
tsk->mm->context.pgstes = 1;
mm = dup_mm(tsk);
tsk->mm->context.pgstes = 0;
rc = -ENOMEM;
if (!mm)
goto unlock;
mmput(tsk->mm);
return -ENOMEM;
/* Now lets check again if somebody attached ptrace etc */
task_lock(tsk);
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
mmput(mm);
task_unlock(tsk);
return -EINVAL;
}
/* ok, we are alone. No ptrace, no threads, etc. */
old_mm = tsk->mm;
tsk->mm = tsk->active_mm = mm;
preempt_disable();
update_mm(mm, tsk);
cpu_set(smp_processor_id(), mm->cpu_vm_mask);
preempt_enable();
rc = 0;
unlock:
task_unlock(tsk);
return rc;
mmput(old_mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);

View file

@ -236,7 +236,7 @@ static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
if (seg->start + seg->size >= VMEM_MAX_PHYS ||
if (seg->start + seg->size > VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;

View file

@ -121,8 +121,10 @@ static int stop_ptraced_child(int pid, int exitcode, int mustexit)
{
int status, n, ret = 0;
if (ptrace(PTRACE_CONT, pid, 0, 0) < 0)
fatal_perror("stop_ptraced_child : ptrace failed");
if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) {
perror("stop_ptraced_child : ptrace failed");
return -1;
}
CATCH_EINTR(n = waitpid(pid, &status, 0));
if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) {
int exit_with = WEXITSTATUS(status);
@ -212,7 +214,7 @@ static void __init check_sysemu(void)
if (n < 0)
fatal_perror("check_sysemu : wait failed");
if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP))
fatal("check_sysemu : expected SIGTRAP, got status = %d",
fatal("check_sysemu : expected SIGTRAP, got status = %d\n",
status);
if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
@ -254,9 +256,11 @@ static void __init check_sysemu(void)
if (WIFSTOPPED(status) &&
(WSTOPSIG(status) == (SIGTRAP|0x80))) {
if (!count)
fatal("check_ptrace : SYSEMU_SINGLESTEP "
"doesn't singlestep");
if (!count) {
non_fatal("check_ptrace : SYSEMU_SINGLESTEP "
"doesn't singlestep");
goto fail;
}
n = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_RET_OFFSET,
os_getpid());
if (n < 0)
@ -266,9 +270,12 @@ static void __init check_sysemu(void)
}
else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP))
count++;
else
fatal("check_ptrace : expected SIGTRAP or "
"(SIGTRAP | 0x80), got status = %d", status);
else {
non_fatal("check_ptrace : expected SIGTRAP or "
"(SIGTRAP | 0x80), got status = %d\n",
status);
goto fail;
}
}
if (stop_ptraced_child(pid, 0, 0) < 0)
goto fail_stopped;

View file

@ -6,7 +6,7 @@
#include <errno.h>
#include <sys/ptrace.h>
#include <asm/user.h>
#include <sys/user.h>
#include "kern_constants.h"
#include "longjmp.h"
#include "user.h"
@ -76,7 +76,7 @@ int put_fp_registers(int pid, unsigned long *regs)
void arch_init_registers(int pid)
{
struct user_fxsr_struct fpx_regs;
struct user_fpxregs_struct fpx_regs;
int err;
err = ptrace(PTRACE_GETFPXREGS, pid, 0, &fpx_regs);

View file

@ -115,8 +115,6 @@ static void enable_a20_fast(void)
int enable_a20(void)
{
int loops = A20_ENABLE_LOOPS;
#if defined(CONFIG_X86_ELAN)
/* Elan croaks if we try to touch the KBC */
enable_a20_fast();
@ -128,6 +126,7 @@ int enable_a20(void)
enable_a20_kbc();
return 0;
#else
int loops = A20_ENABLE_LOOPS;
while (loops--) {
/* First, check to see if A20 is already enabled
(legacy free, etc.) */

View file

@ -248,6 +248,7 @@ ENTRY(resume_userspace)
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending
# between sampling and the iret
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
# int/exception return?

View file

@ -189,7 +189,7 @@ default_entry:
* this stage.
*/
#define KPMDS ((0x100000000-__PAGE_OFFSET) >> 30) /* Number of kernel PMDs */
#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
xorl %ebx,%ebx /* %ebx is kept at zero */

View file

@ -2130,14 +2130,10 @@ static inline void __init check_timer(void)
{
int apic1, pin1, apic2, pin2;
int vector;
unsigned int ver;
unsigned long flags;
local_irq_save(flags);
ver = apic_read(APIC_LVR);
ver = GET_APIC_VERSION(ver);
/*
* get/set the timer IRQ vector:
*/
@ -2150,15 +2146,11 @@ static inline void __init check_timer(void)
* mode for the 8259A whenever interrupts are routed
* through I/O APICs. Also IRQ0 has to be enabled in
* the 8259A which implies the virtual wire has to be
* disabled in the local APIC. Finally timer interrupts
* need to be acknowledged manually in the 8259A for
* timer_interrupt() and for the i82489DX when using
* the NMI watchdog.
* disabled in the local APIC.
*/
apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
timer_ack = !cpu_has_tsc;
timer_ack |= (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
timer_ack = 1;
if (timer_over_8254 > 0)
enable_8259A_irq(0);

View file

@ -155,6 +155,7 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
wrmsr(msr, value, dummy);
return 0;
}
EXPORT_SYMBOL_GPL(geode_mfgpt_toggle_event);
int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
{
@ -222,6 +223,7 @@ int geode_mfgpt_alloc_timer(int timer, int domain)
/* No timers available - too bad */
return -1;
}
EXPORT_SYMBOL_GPL(geode_mfgpt_alloc_timer);
#ifdef CONFIG_GEODE_MFGPT_TIMER

View file

@ -26,7 +26,6 @@
#include <asm/smp.h>
#include <asm/nmi.h>
#include <asm/timer.h>
#include "mach_traps.h"
@ -82,7 +81,7 @@ int __init check_nmi_watchdog(void)
prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
if (!prev_nmi_count)
goto error;
return -1;
printk(KERN_INFO "Testing NMI watchdog ... ");
@ -119,7 +118,7 @@ int __init check_nmi_watchdog(void)
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
goto error;
return -1;
}
printk("OK.\n");
@ -130,10 +129,6 @@ int __init check_nmi_watchdog(void)
kfree(prev_nmi_count);
return 0;
error:
timer_ack = !cpu_has_tsc;
return -1;
}
static int __init setup_nmi_watchdog(char *str)

View file

@ -379,6 +379,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
struct page *page;
unsigned long dma_mask = 0;
dma_addr_t bus;
int noretry = 0;
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@ -398,20 +399,25 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dev->dma_mask == NULL)
return NULL;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
/* Don't invoke OOM killer or retry in lower 16MB DMA zone */
if (gfp & __GFP_DMA)
noretry = 1;
#ifdef CONFIG_X86_64
/* Why <=? Even when the mask is smaller than 4GB it is often
larger than 16MB and in this case we have a chance of
finding fitting memory in the next higher zone first. If
not retry with true GFP_DMA. -AK */
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
gfp |= GFP_DMA32;
if (dma_mask < DMA_32BIT_MASK)
noretry = 1;
}
#endif
again:
page = dma_alloc_pages(dev, gfp, get_order(size));
page = dma_alloc_pages(dev,
noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
if (page == NULL)
return NULL;

View file

@ -4,6 +4,8 @@
#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
#include <linux/pnp.h>
#include <asm/time.h>
#include <asm/vsyscall.h>
@ -197,3 +199,35 @@ unsigned long long native_read_tsc(void)
}
EXPORT_SYMBOL(native_read_tsc);
static struct resource rtc_resources[] = {
[0] = {
.start = RTC_PORT(0),
.end = RTC_PORT(1),
.flags = IORESOURCE_IO,
},
[1] = {
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device rtc_device = {
.name = "rtc_cmos",
.id = -1,
.resource = rtc_resources,
.num_resources = ARRAY_SIZE(rtc_resources),
};
static __init int add_rtc_cmos(void)
{
#ifdef CONFIG_PNP
if (!pnp_platform_devices)
platform_device_register(&rtc_device);
#else
platform_device_register(&rtc_device);
#endif /* CONFIG_PNP */
return 0;
}
device_initcall(add_rtc_cmos);

View file

@ -544,6 +544,7 @@ vm86_trap:
#define DO_ERROR(trapnr, signr, str, name) \
void do_##name(struct pt_regs *regs, long error_code) \
{ \
trace_hardirqs_fixup(); \
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
== NOTIFY_STOP) \
return; \

View file

@ -200,7 +200,6 @@ int __pit_timer_fn(struct kvm_kpit_state *ps)
atomic_inc(&pt->pending);
smp_mb__after_atomic_inc();
/* FIXME: handle case where the guest is in guest mode */
if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
wake_up_interruptible(&vcpu0->wq);
@ -237,6 +236,19 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
{
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
struct hrtimer *timer;
if (vcpu->vcpu_id != 0 || !pit)
return;
timer = &pit->pit_state.pit_timer.timer;
if (hrtimer_cancel(timer))
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
}
static void destroy_pit_timer(struct kvm_kpit_timer *pt)
{
pr_debug("pit: execute del timer!\n");

View file

@ -94,3 +94,9 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);
__kvm_migrate_pit_timer(vcpu);
}

View file

@ -84,6 +84,8 @@ void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int pit_has_pending_timer(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);

View file

@ -658,7 +658,7 @@ static int is_empty_shadow_page(u64 *spt)
u64 *end;
for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
if (*pos != shadow_trap_nonpresent_pte) {
if (is_shadow_present_pte(*pos)) {
printk(KERN_ERR "%s: %p %llx\n", __func__,
pos, *pos);
return 0;
@ -1858,6 +1858,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, sp);
cond_resched();
}
free_page((unsigned long)vcpu->arch.mmu.pae_root);
}

View file

@ -418,7 +418,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
/* mmio */
if (is_error_pfn(pfn)) {
pgprintk("gfn %x is mmio\n", walker.gfn);
pgprintk("gfn %lx is mmio\n", walker.gfn);
kvm_release_pfn_clean(pfn);
return 1;
}

View file

@ -688,7 +688,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
delta = vcpu->arch.host_tsc - tsc_this;
svm->vmcb->control.tsc_offset += delta;
vcpu->cpu = cpu;
kvm_migrate_apic_timer(vcpu);
kvm_migrate_timers(vcpu);
}
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)

View file

@ -608,7 +608,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
kvm_migrate_apic_timer(vcpu);
kvm_migrate_timers(vcpu);
vpid_sync_vcpu_all(vmx);
}
@ -1036,6 +1036,7 @@ static void hardware_enable(void *garbage)
static void hardware_disable(void *garbage)
{
asm volatile (ASM_VMX_VMXOFF : : : "cc");
write_cr4(read_cr4() & ~X86_CR4_VMXE);
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,

View file

@ -2758,7 +2758,7 @@ again:
if (vcpu->requests) {
if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
__kvm_migrate_apic_timer(vcpu);
__kvm_migrate_timers(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;

View file

@ -1727,7 +1727,8 @@ twobyte_insn:
if (rc)
goto done;
kvm_emulate_hypercall(ctxt->vcpu);
/* Let the processor re-execute the fixed hypercall */
c->eip = ctxt->vcpu->arch.rip;
/* Disable writeback. */
c->dst.type = OP_NONE;
break;

View file

@ -497,6 +497,11 @@ static int vmalloc_fault(unsigned long address)
unsigned long pgd_paddr;
pmd_t *pmd_k;
pte_t *pte_k;
/* Make sure we are in vmalloc area */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.

View file

@ -97,36 +97,9 @@ static __init inline int srat_disabled(void)
return numa_off || acpi_numa < 0;
}
/*
* A lot of BIOS fill in 10 (= no distance) everywhere. This messes
* up the NUMA heuristics which wants the local node to have a smaller
* distance than the others.
* Do some quick checks here and only use the SLIT if it passes.
*/
static __init int slit_valid(struct acpi_table_slit *slit)
{
int i, j;
int d = slit->locality_count;
for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
if (i == j) {
if (val != LOCAL_DISTANCE)
return 0;
} else if (val <= LOCAL_DISTANCE)
return 0;
}
}
return 1;
}
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
if (!slit_valid(slit)) {
printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
return;
}
acpi_slit = slit;
}

View file

@ -328,18 +328,18 @@ static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
#endif
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL385 G2",
.ident = "HP ProLiant DL360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
},
},
{
.callback = set_bf_sort,
.ident = "HP ProLiant DL585 G2",
.ident = "HP ProLiant DL380",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
},
},
{}

View file

@ -12,6 +12,7 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/kernel_stat.h>
#include <linux/math64.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@ -150,11 +151,7 @@ static void do_stolen_accounting(void)
if (stolen < 0)
stolen = 0;
ticks = 0;
while (stolen >= NS_PER_TICK) {
ticks++;
stolen -= NS_PER_TICK;
}
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__get_cpu_var(residual_stolen) = stolen;
account_steal_time(NULL, ticks);
@ -166,11 +163,7 @@ static void do_stolen_accounting(void)
if (blocked < 0)
blocked = 0;
ticks = 0;
while (blocked >= NS_PER_TICK) {
ticks++;
blocked -= NS_PER_TICK;
}
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
__get_cpu_var(residual_blocked) = blocked;
account_steal_time(idle_task(smp_processor_id()), ticks);
}

View file

@ -79,16 +79,17 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
{
int n;
va_list args;
unsigned long flags;
char *buf;
preempt_disable();
local_irq_save(flags);
buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
va_start(args, fmt);
n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
va_end(args);
trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
preempt_enable();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(__trace_note_message);
@ -158,10 +159,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
/*
* A word about the locking here - we disable interrupts to reserve
* some space in the relay per-cpu buffer, to prevent an irq
* from coming in and stepping on our toes. Once reserved, it's
* enough to get preemption disabled to prevent read of this data
* before we are through filling it. get_cpu()/put_cpu() does this
* for us
* from coming in and stepping on our toes.
*/
local_irq_save(flags);

View file

@ -301,16 +301,20 @@ static int bay_add(acpi_handle handle, int id)
*/
pdev->dev.uevent_suppress = 0;
if (acpi_bay_add_fs(new_bay)) {
platform_device_unregister(new_bay->pdev);
goto bay_add_err;
}
/* register for events on this device */
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
bay_notify, new_bay);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Error installing bay notify handler\n");
printk(KERN_INFO PREFIX "Error installing bay notify handler\n");
platform_device_unregister(new_bay->pdev);
goto bay_add_err;
}
if (acpi_bay_add_fs(new_bay)) {
acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
bay_notify);
platform_device_unregister(new_bay->pdev);
goto bay_add_err;
}
/* if we are on a dock station, we should register for dock

View file

@ -450,10 +450,6 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
if (!arg) {
return_ACPI_STATUS(AE_AML_NO_OPERAND);
}
/* Creating new namespace node(s), should not already exist */
flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
@ -467,6 +463,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
/*
* Walk the list of entries in the field_list
* Note: field_list can be of zero length. In this case, Arg will be NULL.
*/
while (arg) {
/*

View file

@ -834,7 +834,7 @@ static int dock_add(acpi_handle handle)
goto dock_add_err;
}
printk(KERN_INFO PREFIX "%s \n", ACPI_DOCK_DRIVER_DESCRIPTION);
printk(KERN_INFO PREFIX "%s\n", ACPI_DOCK_DRIVER_DESCRIPTION);
return 0;

View file

@ -194,7 +194,7 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
while (time_before(jiffies, delay)) {
if (acpi_ec_check_status(ec, event))
return 0;
udelay(ACPI_EC_UDELAY);
msleep(1);
}
}
pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",

View file

@ -375,9 +375,15 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
goto cleanup;
}
/*
* Add the table to the namespace.
*
* Note: We load the table objects relative to the root of the namespace.
* This appears to go against the ACPI specification, but we do it for
* compatibility with other ACPI implementations.
*/
status =
acpi_ex_add_table(table_index, walk_state->scope_info->scope.node,
&ddb_handle);
acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
if (ACPI_FAILURE(status)) {
/* On error, table_ptr was deallocated above */

View file

@ -272,6 +272,12 @@ static u32 rtc_handler(void *context)
static inline void rtc_wake_setup(void)
{
acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
/*
* After the RTC handler is installed, the Fixed_RTC event should
* be disabled. Only when the RTC alarm is set will it be enabled.
*/
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
}
static void rtc_wake_on(struct device *dev)

View file

@ -223,15 +223,17 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
break;
}
/* Set the system indicators to show the desired sleep state. */
/*
* Set the system indicators to show the desired sleep state.
* _SST is an optional method (return no error if not found)
*/
status = acpi_evaluate_object(NULL, METHOD_NAME__SST, &arg_list, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
ACPI_EXCEPTION((AE_INFO, status,
"While executing method _SST"));
}
return_ACPI_STATUS(status);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)

View file

@ -140,19 +140,42 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
}
}
/*
* A lot of BIOS fill in 10 (= no distance) everywhere. This messes
* up the NUMA heuristics which wants the local node to have a smaller
* distance than the others.
* Do some quick checks here and only use the SLIT if it passes.
*/
static __init int slit_valid(struct acpi_table_slit *slit)
{
int i, j;
int d = slit->locality_count;
for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j];
if (i == j) {
if (val != LOCAL_DISTANCE)
return 0;
} else if (val <= LOCAL_DISTANCE)
return 0;
}
}
return 1;
}
static int __init acpi_parse_slit(struct acpi_table_header *table)
{
struct acpi_table_slit *slit;
u32 localities;
if (!table)
return -EINVAL;
slit = (struct acpi_table_slit *)table;
/* downcast just for %llu vs %lu for i386/ia64 */
localities = (u32) slit->locality_count;
if (!slit_valid(slit)) {
printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
return -EINVAL;
}
acpi_numa_slit_init(slit);
return 0;

View file

@ -268,7 +268,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*/
if (ACPI_SUCCESS(status) &&
possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
if (walk_state->opcode == AML_UNLOAD_OP) {
/*
* acpi_ps_get_next_namestring has increased the AML pointer,
* so we need to restore the saved AML pointer for method call.
@ -691,7 +691,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
/* To support super_name arg of Unload */
if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
if (walk_state->opcode == AML_UNLOAD_OP) {
status =
acpi_ps_get_next_namepath(walk_state,
parser_state, arg,

View file

@ -86,7 +86,6 @@ static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
extern int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {

View file

@ -1669,6 +1669,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
return -EINVAL;
}
dev->cpu = pr->id;
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
dev->states[i].name[0] = '\0';
dev->states[i].desc[0] = '\0';
@ -1738,7 +1739,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
{
int ret;
int ret = 0;
if (boot_option_idle_override)
return 0;
@ -1756,8 +1757,10 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
cpuidle_pause_and_lock();
cpuidle_disable_device(&pr->power.dev);
acpi_processor_get_power_info(pr);
acpi_processor_setup_cpuidle(pr);
ret = cpuidle_enable_device(&pr->power.dev);
if (pr->flags.power) {
acpi_processor_setup_cpuidle(pr);
ret = cpuidle_enable_device(&pr->power.dev);
}
cpuidle_resume_and_unlock();
return ret;
@ -1813,7 +1816,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
if (pr->flags.power) {
#ifdef CONFIG_CPU_IDLE
acpi_processor_setup_cpuidle(pr);
pr->power.dev.cpu = pr->id;
if (cpuidle_register_device(&pr->power.dev))
return -EIO;
#endif
@ -1850,8 +1852,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
return 0;
#ifdef CONFIG_CPU_IDLE
if (pr->flags.power)
cpuidle_unregister_device(&pr->power.dev);
cpuidle_unregister_device(&pr->power.dev);
#endif
pr->flags.power_setup_done = 0;

View file

@ -495,6 +495,12 @@ static int __init acpi_sleep_proc_init(void)
acpi_root_dir, &acpi_system_alarm_fops);
acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
/*
* Disable the RTC event after installing RTC handler.
* Only when RTC alarm is set will it be enabled.
*/
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
#endif /* HAVE_ACPI_LEGACY_ALARM */
/* 'wakeup device' [R/W] */

View file

@ -77,7 +77,6 @@ static ssize_t acpi_table_show(struct kobject *kobj,
container_of(bin_attr, struct acpi_table_attr, attr);
struct acpi_table_header *table_header = NULL;
acpi_status status;
ssize_t ret_count = count;
status =
acpi_get_table(table_attr->name, table_attr->instance,
@ -85,18 +84,8 @@ static ssize_t acpi_table_show(struct kobject *kobj,
if (ACPI_FAILURE(status))
return -ENODEV;
if (offset >= table_header->length) {
ret_count = 0;
goto end;
}
if (offset + ret_count > table_header->length)
ret_count = table_header->length - offset;
memcpy(buf, ((char *)table_header) + offset, ret_count);
end:
return ret_count;
return memory_read_from_buffer(buf, count, &offset,
table_header, table_header->length);
}
static void acpi_table_attr_init(struct acpi_table_attr *table_attr,

View file

@ -123,24 +123,13 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc,
}
}
/* The table must be either an SSDT or a PSDT or an OEMx */
if (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT)&&
!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)&&
strncmp(table_desc->pointer->signature, "OEM", 3)) {
/* Check for a printable name */
if (acpi_ut_valid_acpi_name(
*(u32 *) table_desc->pointer->signature)) {
ACPI_ERROR((AE_INFO, "Table has invalid signature "
"[%4.4s], must be SSDT or PSDT",
table_desc->pointer->signature));
} else {
ACPI_ERROR((AE_INFO, "Table has invalid signature "
"(0x%8.8X), must be SSDT or PSDT",
*(u32 *) table_desc->pointer->signature));
}
return_ACPI_STATUS(AE_BAD_SIGNATURE);
}
/*
* Originally, we checked the table signature for "SSDT" or "PSDT" here.
* Next, we added support for OEMx tables, signature "OEM".
* Valid tables were encountered with a null signature, so we've just
* given up on validating the signature, since it seems to be a waste
* of code. The original code was removed (05/2008).
*/
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);

View file

@ -540,7 +540,7 @@ static acpi_status acpi_tb_load_namespace(void)
acpi_tb_print_table_header(0, table);
if (no_auto_ssdt == 0) {
printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"");
printk(KERN_WARNING "ACPI: DSDT override uses original SSDTs unless \"acpi_no_auto_ssdt\"\n");
}
}

View file

@ -364,10 +364,17 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
if (flag & ACPI_TRIPS_CRITICAL) {
status = acpi_evaluate_integer(tz->device->handle,
"_CRT", NULL, &tz->trips.critical.temperature);
if (ACPI_FAILURE(status)) {
/*
* Treat freezing temperatures as invalid as well; some
* BIOSes return really low values and cause reboots at startup.
* Below zero (Celcius) values clearly aren't right for sure..
* ... so lets discard those as invalid.
*/
if (ACPI_FAILURE(status) ||
tz->trips.critical.temperature <= 2732) {
tz->trips.critical.flags.valid = 0;
ACPI_EXCEPTION((AE_INFO, status,
"No critical threshold"));
"No or invalid critical threshold"));
return -ENODEV;
} else {
tz->trips.critical.flags.valid = 1;

View file

@ -1048,6 +1048,7 @@ acpi_ut_exception(char *module_name,
va_start(args, format);
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
va_end(args);
}
EXPORT_SYMBOL(acpi_ut_exception);
@ -1063,7 +1064,6 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...)
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
va_end(args);
va_end(args);
}
void ACPI_INTERNAL_VAR_XFACE

View file

@ -89,6 +89,7 @@ enum {
board_ahci_sb600 = 3,
board_ahci_mv = 4,
board_ahci_sb700 = 5,
board_ahci_mcp65 = 6,
/* global controller registers */
HOST_CAP = 0x00, /* host capabilities */
@ -190,6 +191,7 @@ enum {
AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
/* ap->flags bits */
@ -253,6 +255,8 @@ static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
@ -329,6 +333,12 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_sb600_ops = {
.inherits = &ahci_ops,
.softreset = ahci_sb600_softreset,
.pmp_softreset = ahci_sb600_softreset,
};
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
static const struct ata_port_info ahci_port_info[] = {
@ -359,11 +369,11 @@ static const struct ata_port_info ahci_port_info[] = {
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
AHCI_HFLAG_SECT255),
.flags = AHCI_FLAG_COMMON,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
.port_ops = &ahci_sb600_ops,
},
/* board_ahci_mv */
{
@ -377,8 +387,15 @@ static const struct ata_port_info ahci_port_info[] = {
},
/* board_ahci_sb700 */
{
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
AHCI_HFLAG_NO_PMP),
AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
.flags = AHCI_FLAG_COMMON,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_sb600_ops,
},
/* board_ahci_mcp65 */
{
AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
.flags = AHCI_FLAG_COMMON,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
@ -438,14 +455,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
/* NVIDIA */
{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
{ PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
{ PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
@ -624,6 +641,12 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
cap &= ~HOST_CAP_NCQ;
}
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can do NCQ, turning on CAP_NCQ\n");
cap |= HOST_CAP_NCQ;
}
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
dev_printk(KERN_INFO, &pdev->dev,
"controller can't do PMP, turning off CAP_PMP\n");
@ -1262,19 +1285,11 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
return 0;
}
static int ahci_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
return ata_check_ready(status);
}
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
int pmp, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
{
struct ata_port *ap = link->ap;
int pmp = sata_srst_pmp(link);
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
@ -1312,7 +1327,7 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
/* wait for link to become ready */
rc = ata_wait_after_reset(link, deadline, ahci_check_ready);
rc = ata_wait_after_reset(link, deadline, check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
reason = "device not ready";
@ -1328,6 +1343,72 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
return rc;
}
static int ahci_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
return ata_check_ready(status);
}
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
int pmp = sata_srst_pmp(link);
DPRINTK("ENTER\n");
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
static int ahci_sb600_check_ready(struct ata_link *link)
{
void __iomem *port_mmio = ahci_port_base(link->ap);
u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
/*
* There is no need to check TFDATA if BAD PMP is found due to HW bug,
* which can save timeout delay.
*/
if (irq_status & PORT_IRQ_BAD_PMP)
return -EIO;
return ata_check_ready(status);
}
static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
void __iomem *port_mmio = ahci_port_base(ap);
int pmp = sata_srst_pmp(link);
int rc;
u32 irq_sts;
DPRINTK("ENTER\n");
rc = ahci_do_softreset(link, class, pmp, deadline,
ahci_sb600_check_ready);
/*
* Soft reset fails on some ATI chips with IPMS set when PMP
* is enabled but SATA HDD/ODD is connected to SATA port,
* do soft reset again to port 0.
*/
if (rc == -EIO) {
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
ata_link_printk(link, KERN_WARNING,
"failed due to HW bug, retry pmp=0\n");
rc = ahci_do_softreset(link, class, 0, deadline,
ahci_check_ready);
}
}
return rc;
}
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@ -2118,7 +2199,8 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info pi = ahci_port_info[ent->driver_data];
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
@ -2167,6 +2249,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
hpriv->flags |= (unsigned long)pi.private_data;
/* MCP65 revision A1 and A2 can't do MSI */
if (board_id == board_ahci_mcp65 &&
(pdev->revision == 0xa1 || pdev->revision == 0xa2))
hpriv->flags |= AHCI_HFLAG_NO_MSI;
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);

View file

@ -18,7 +18,6 @@
#include "midway.h"
#define KERNEL_OFFSET 0xC0000000 /* kernel 0x0 is at phys 0xC0000000 */
#define DEV_LABEL "eni"
#define UBR_BUFFER (128*1024) /* UBR buffer size */

View file

@ -762,6 +762,7 @@ static void device_remove_class_symlinks(struct device *dev)
/**
* dev_set_name - set a device name
* @dev: device
* @fmt: format string for the device's name
*/
int dev_set_name(struct device *dev, const char *fmt, ...)
{

View file

@ -53,15 +53,16 @@
#include <linux/scatterlist.h>
#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
#define DRIVER_NAME "HP CISS Driver (v 3.6.14)"
#define DRIVER_VERSION CCISS_DRIVER_VERSION(3,6,14)
#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
/* Embedded module documentation macros - see modules.h */
MODULE_AUTHOR("Hewlett-Packard Company");
MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 3.6.14");
MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
" SA6i P600 P800 P400 P400i E200 E200i E500");
MODULE_VERSION("3.6.14");
" SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
" Smart Array G2 Series SAS/SATA Controllers");
MODULE_VERSION("3.6.20");
MODULE_LICENSE("GPL");
#include "cciss_cmd.h"
@ -90,6 +91,11 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@ -123,6 +129,11 @@ static struct board_type products[] = {
{0x3215103C, "Smart Array E200i", &SA5_access, 120},
{0x3237103C, "Smart Array E500", &SA5_access, 512},
{0x323D103C, "Smart Array P700m", &SA5_access, 512},
{0x3241103C, "Smart Array P212", &SA5_access, 384},
{0x3243103C, "Smart Array P410", &SA5_access, 384},
{0x3245103C, "Smart Array P410i", &SA5_access, 384},
{0x3247103C, "Smart Array P411", &SA5_access, 384},
{0x3249103C, "Smart Array P812", &SA5_access, 384},
{0xFFFF103C, "Unknown Smart Array", &SA5_access, 120},
};

View file

@ -749,7 +749,7 @@ config NVRAM
if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support"
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
&& !ARM && !SUPERH && !S390 && !AVR32
---help---
@ -1036,9 +1036,9 @@ config HPET
non-periodic and/or periodic.
config HPET_RTC_IRQ
bool "HPET Control RTC IRQ" if !HPET_EMULATE_RTC
default n
depends on HPET
bool
default HPET_EMULATE_RTC
depends on RTC && HPET
help
If you say Y here, you will disable RTC_IRQ in drivers/char/rtc.c. It
is assumed the platform called hpet_alloc with the RTC IRQ values for

View file

@ -457,6 +457,10 @@ static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
.device_id = PCI_DEVICE_ID_ATI_RS300_200,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_133,
.chipset_name = "IGP9000/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_200,
.chipset_name = "IGP9100/M",

View file

@ -133,7 +133,7 @@ static struct miscdevice nvram_dev = {
int __init nvram_init(void)
{
printk(KERN_INFO "Macintosh non-volatile memory driver v%s\n",
printk(KERN_INFO "Generic non-volatile memory driver v%s\n",
NVRAM_VERSION);
return misc_register(&nvram_dev);
}

View file

@ -273,7 +273,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw)
if (mfc != INTEL_FWH_MANUFACTURER_CODE ||
(dvc != INTEL_FWH_DEVICE_CODE_8M &&
dvc != INTEL_FWH_DEVICE_CODE_4M)) {
printk(KERN_ERR PFX "FWH not detected\n");
printk(KERN_NOTICE PFX "FWH not detected\n");
return -ENODEV;
}

View file

@ -677,12 +677,7 @@ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
static void k_self(struct vc_data *vc, unsigned char value, char up_flag)
{
unsigned int uni;
if (kbd->kbdmode == VC_UNICODE)
uni = value;
else
uni = conv_8bit_to_uni(value);
k_unicode(vc, uni, up_flag);
k_unicode(vc, conv_8bit_to_uni(value), up_flag);
}
static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)

View file

@ -38,6 +38,8 @@ static void cpuidle_kick_cpus(void)
static void cpuidle_kick_cpus(void) {}
#endif
static int __cpuidle_register_device(struct cpuidle_device *dev);
/**
* cpuidle_idle_call - the main idle loop
*
@ -138,6 +140,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!dev->state_count)
return -EINVAL;
if (dev->registered == 0) {
ret = __cpuidle_register_device(dev);
if (ret)
return ret;
}
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
@ -232,10 +240,13 @@ static void poll_idle_init(struct cpuidle_device *dev) {}
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
/**
* cpuidle_register_device - registers a CPU's idle PM feature
* __cpuidle_register_device - internal register function called before register
* and enable routines
* @dev: the cpu
*
* cpuidle_lock mutex must be held before this is called
*/
int cpuidle_register_device(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
@ -247,18 +258,34 @@ int cpuidle_register_device(struct cpuidle_device *dev)
init_completion(&dev->kobj_unregister);
mutex_lock(&cpuidle_lock);
poll_idle_init(dev);
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
mutex_unlock(&cpuidle_lock);
module_put(cpuidle_curr_driver->owner);
return ret;
}
dev->registered = 1;
return 0;
}
/**
* cpuidle_register_device - registers a CPU's idle PM feature
* @dev: the cpu
*/
int cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
mutex_lock(&cpuidle_lock);
if ((ret = __cpuidle_register_device(dev))) {
mutex_unlock(&cpuidle_lock);
return ret;
}
cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
@ -278,6 +305,9 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
{
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
if (dev->registered == 0)
return;
cpuidle_pause_and_lock();
cpuidle_disable_device(dev);

View file

@ -1028,6 +1028,7 @@ endif
config BLK_DEV_HD_ONLY
bool "Old hard disk (MFM/RLL/IDE) driver"
depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN
help
There are two drivers for MFM/RLL/IDE hard disks. Most people use
the newer enhanced driver, but this old one is still around for two

View file

@ -42,6 +42,7 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
hw.io_ports.ctl_addr = aux + (6 * 0x20);
hw.irq = irq;
hw.chipset = ide_generic;
hwif = ide_find_port();
if (hwif == NULL)

View file

@ -49,6 +49,7 @@ static int __init ide_arm_init(void)
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = IDE_ARM_IRQ;
hw.chipset = ide_generic;
hwif = ide_find_port();
if (hwif) {

View file

@ -409,9 +409,6 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
ide_device_add(idx, &palm_bk3710_port_info);
if (!hwif->present)
goto out;
return 0;
out:
printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");

View file

@ -22,6 +22,10 @@
#define DRV_NAME "ide_generic"
static int probe_mask = 0x03;
module_param(probe_mask, int, 0);
MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
static ssize_t store_add(struct class *cls, const char *buf, size_t n)
{
ide_hwif_t *hwif;
@ -89,6 +93,9 @@ static int __init ide_generic_init(void)
u8 idx[MAX_HWIFS];
int i;
printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" module "
"parameter for probing all legacy ISA IDE ports\n");
for (i = 0; i < MAX_HWIFS; i++) {
ide_hwif_t *hwif;
unsigned long io_addr = ide_default_io_base(i);
@ -96,7 +103,7 @@ static int __init ide_generic_init(void)
idx[i] = 0xff;
if (io_addr) {
if ((probe_mask & (1 << i)) && io_addr) {
if (!request_region(io_addr, 8, DRV_NAME)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
"not free.\n",
@ -125,6 +132,7 @@ static int __init ide_generic_init(void)
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
hw.irq = ide_default_irq(io_addr);
hw.chipset = ide_generic;
ide_init_port_hw(hwif, &hw);
idx[i] = i;

View file

@ -55,6 +55,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&hw, 0, sizeof(hw));
ide_std_init_ports(&hw, base, ctl);
hw.irq = pnp_irq(dev, 0);
hw.chipset = ide_generic;
hwif = ide_find_port();
if (hwif) {

View file

@ -1333,8 +1333,7 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
const struct ide_port_info *d)
{
if (d->chipset != ide_etrax100)
hwif->channel = port;
hwif->channel = port;
if (d->chipset)
hwif->chipset = d->chipset;
@ -1519,7 +1518,7 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
continue;
}
if (d->chipset != ide_etrax100 && (i & 1) && mate) {
if ((i & 1) && mate) {
hwif->mate = mate;
mate->mate = hwif;
}
@ -1665,6 +1664,7 @@ static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
hw->chipset = d->chipset;
hwif = ide_find_port_slot(d);
if (hwif) {

View file

@ -63,7 +63,6 @@ static int proc_ide_read_imodel
case ide_pmac: name = "mac-io"; break;
case ide_au1xxx: name = "au1xxx"; break;
case ide_palm3710: name = "palm3710"; break;
case ide_etrax100: name = "etrax100"; break;
case ide_acorn: name = "acorn"; break;
default: name = "(unknown)"; break;
}

View file

@ -138,6 +138,8 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
hw->chipset = ide_generic;
}
/*

View file

@ -81,6 +81,8 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
hw->irq = IRQ_MFP_IDE;
hw->ack_intr = NULL;
hw->chipset = ide_generic;
}
/*

View file

@ -16,6 +16,7 @@
#include <linux/ide.h>
#include <linux/init.h>
#include <linux/zorro.h>
#include <linux/module.h>
#include <asm/setup.h>
#include <asm/amigahw.h>
@ -62,7 +63,10 @@
GAYLE_NUM_HWIFS-1)
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
#define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000)
int ide_doubler = 0; /* support IDE doublers? */
EXPORT_SYMBOL_GPL(ide_doubler);
module_param_named(doubler, ide_doubler, bool, 0);
MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
#endif /* CONFIG_BLK_DEV_IDEDOUBLER */
@ -112,6 +116,8 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = IRQ_AMIGA_PORTS;
hw->ack_intr = ack_intr;
hw->chipset = ide_generic;
}
/*

View file

@ -135,13 +135,17 @@ static void ide_detach(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
ide_hwif_t *hwif = info->hwif;
unsigned long data_addr, ctl_addr;
DEBUG(0, "ide_detach(0x%p)\n", link);
data_addr = hwif->io_ports.data_addr;
ctl_addr = hwif->io_ports.ctl_addr;
ide_release(link);
release_region(hwif->io_ports.ctl_addr, 1);
release_region(hwif->io_ports.data_addr, 8);
release_region(ctl_addr, 1);
release_region(data_addr, 8);
kfree(info);
} /* ide_detach */
@ -194,6 +198,16 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
if (hwif->present)
return hwif;
/* retry registration in case device is still spinning up */
for (i = 0; i < 10; i++) {
msleep(100);
ide_port_scan(hwif);
if (hwif->present)
return hwif;
}
return hwif;
out_release:
release_region(ctl, 1);
release_region(io, 8);
@ -222,7 +236,7 @@ static int ide_config(struct pcmcia_device *link)
cistpl_cftable_entry_t dflt;
} *stk = NULL;
cistpl_cftable_entry_t *cfg;
int i, pass, last_ret = 0, last_fn = 0, is_kme = 0;
int pass, last_ret = 0, last_fn = 0, is_kme = 0;
unsigned long io_base, ctl_base;
ide_hwif_t *hwif;
@ -319,30 +333,15 @@ static int ide_config(struct pcmcia_device *link)
if (is_kme)
outb(0x81, ctl_base+1);
/* retry registration in case device is still spinning up */
for (i = 0; i < 10; i++) {
hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
if (hwif)
break;
if (link->io.NumPorts1 == 0x20) {
hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
if (hwif == NULL && link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
link->irq.AssignedIRQ, link);
if (hwif) {
io_base += 0x10;
ctl_base += 0x10;
break;
}
}
msleep(100);
}
if (hwif == NULL) {
printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
", irq %u failed\n", io_base, ctl_base,
link->irq.AssignedIRQ);
if (hwif == NULL)
goto failed;
}
info->ndev = 1;
sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);

View file

@ -78,6 +78,8 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
hw->irq = irq;
hw->ack_intr = ack_intr;
hw->chipset = ide_generic;
}
static const char *mac_ide_name[] =

Some files were not shown because too many files have changed in this diff Show more