Merge /spare/repo/linux-2.6/

This commit is contained in:
Jeff Garzik 2005-09-16 02:46:15 -04:00
commit c5f977a0d2
80 changed files with 311 additions and 605 deletions

View file

@ -146,6 +146,11 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software
};
static struct platform_device lub_audio_device = {
.name = "pxa2xx-ac97",
.id = -1,
};
static struct resource sa1111_resources[] = {
[0] = {
.start = 0x10000000,
@ -195,6 +200,7 @@ static struct platform_device smc91x_device = {
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&lub_audio_device,
&smc91x_device,
};

View file

@ -908,11 +908,6 @@ config IRQBALANCE
The default yes will allow the kernel to do irq load balancing.
Saying no will keep the kernel from doing irq load balancing.
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT) && X86_CMPXCHG
default y
# turning this on wastes a bunch of space.
# Summit needs it only when NUMA is on
config BOOT_IOREMAP

View file

@ -7,7 +7,6 @@
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
static int __init check_bridge(int vendor, int device)
{
@ -16,15 +15,6 @@ static int __init check_bridge(int vendor, int device)
if (vendor == PCI_VENDOR_ID_NVIDIA) {
acpi_skip_timer_override = 1;
}
#ifdef CONFIG_X86_LOCAL_APIC
/*
* ATI IXP chipsets get double timer interrupts.
* For now just do this for all ATI chipsets.
* FIXME: this needs to be checked for the non ACPI case too.
*/
if (vendor == PCI_VENDOR_ID_ATI)
disable_timer_pin_1 = 1;
#endif
return 0;
}

View file

@ -7,4 +7,3 @@ lib-y = checksum.o delay.o usercopy.o getuser.o putuser.o memcpy.o strstr.o \
bitops.o
lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o

View file

@ -1,42 +0,0 @@
/*
* x86 version of "atomic_dec_and_lock()" using
* the atomic "cmpxchg" instruction.
*
* (For CPU's lacking cmpxchg, we use the slow
* generic version, and this one never even gets
* compiled).
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/atomic.h>
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
repeat:
counter = atomic_read(atomic);
newcount = counter-1;
if (!newcount)
goto slow_path;
asm volatile("lock; cmpxchgl %1,%2"
:"=a" (newcount)
:"r" (newcount), "m" (atomic->counter), "0" (counter));
/* If the above failed, "eax" will have changed */
if (newcount != counter)
goto repeat;
return 0;
slow_path:
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -298,11 +298,6 @@ config PREEMPT
source "mm/Kconfig"
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default y
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help

View file

@ -82,17 +82,7 @@ unwcheck: vmlinux
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
archprepare: include/asm-ia64/.offsets.h.stamp
include/asm-ia64/.offsets.h.stamp:
mkdir -p include/asm-ia64
[ -s include/asm-ia64/asm-offsets.h ] \
|| echo "#define IA64_TASK_SIZE 0" > include/asm-ia64/asm-offsets.h
touch $@
CLEAN_FILES += vmlinux.gz bootloader include/asm-ia64/.offsets.h.stamp
CLEAN_FILES += vmlinux.gz bootloader
boot: lib/lib.a vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@

View file

@ -216,12 +216,6 @@ ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
if (!mpnt)
return -ENOMEM;
if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
>> PAGE_SHIFT)) {
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
}
memset(mpnt, 0, sizeof(*mpnt));
down_write(&current->mm->mmap_sem);

View file

@ -4,6 +4,7 @@
* to extract and format the required data.
*/
#define ASM_OFFSETS_C 1
#include <linux/config.h>
#include <linux/sched.h>

View file

@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
lib-$(CONFIG_MD_RAID5) += xor.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED

View file

@ -1,42 +0,0 @@
/*
* Copyright (C) 2003 Jerome Marchand, Bull S.A.
* Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com>
*
* This file is released under the GPLv2, or at your option any later version.
*
* ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This
* code is an adaptation of the x86 version of "atomic_dec_and_lock()".
*/
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
/*
* Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these
* operations have to be done atomically, so that the count doesn't drop to zero without
* acquiring the spinlock first.
*/
int
_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock)
{
int old, new;
do {
old = atomic_read(refcount);
new = old - 1;
if (unlikely (old == 1)) {
/* oops, we may be decrementing to zero, do it the slow way... */
spin_lock(lock);
if (atomic_dec_and_test(refcount))
return 1;
spin_unlock(lock);
return 0;
}
} while (cmpxchg(&refcount->counter, old, new) != old);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -220,11 +220,6 @@ config PREEMPT
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
config HAVE_DEC_LOCK
bool
depends on (SMP || PREEMPT)
default n
config SMP
bool "Symmetric multi-processing support"
---help---

View file

@ -1009,10 +1009,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config HAVE_DEC_LOCK
bool
default y
#
# Select some configuration options automatically based on user selections.
#

View file

@ -581,18 +581,13 @@ asmlinkage int irix_brk(unsigned long brk)
}
/*
* Check if we have enough memory..
* Ok, looks good - let it rip.
*/
if (security_vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT)) {
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) {
ret = -ENOMEM;
goto out;
}
/*
* Ok, looks good - let it rip.
*/
mm->brk = brk;
do_brk(oldbrk, newbrk-oldbrk);
ret = 0;
out:

View file

@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial_copy.o dec_and_lock.o memcpy.o promlib.o \
lib-y += csum_partial_copy.o memcpy.o promlib.o \
strlen_user.o strncpy_user.o strnlen_user.o
obj-y += iomap.o

View file

@ -1,47 +0,0 @@
/*
* MIPS version of atomic_dec_and_lock() using cmpxchg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */
newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -26,10 +26,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config HAVE_DEC_LOCK
bool
default y
config PPC
bool
default y

View file

@ -2,7 +2,7 @@
# Makefile for ppc-specific library files..
#
obj-y := checksum.o string.o strcase.o dec_and_lock.o div64.o
obj-y := checksum.o string.o strcase.o div64.o
obj-$(CONFIG_8xx) += rheap.o
obj-$(CONFIG_CPM2) += rheap.o

View file

@ -1,38 +0,0 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */
newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -28,10 +28,6 @@ config GENERIC_ISA_DMA
bool
default y
config HAVE_DEC_LOCK
bool
default y
config EARLY_PRINTK
bool
default y

View file

@ -1649,7 +1649,7 @@ _GLOBAL(__secondary_start)
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
bne 98f
beq 98f /* branch if result is 0 */
mfspr r3,PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
@ -1813,7 +1813,7 @@ _STATIC(start_here_multiplatform)
ld r3,0(r3)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
bne 98f
beq 98f /* branch if result is 0 */
mfspr r3,PVR
srwi r3,r3,16
cmpwi r3,0x37 /* SStar */
@ -1834,7 +1834,7 @@ _STATIC(start_here_multiplatform)
lwz r3,PLATFORM(r3) /* r3 = platform flags */
/* Test if bit 0 is set (LPAR bit) */
andi. r3,r3,PLATFORM_LPAR
bne 98f
bne 98f /* branch if result is !0 */
LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
sub r6,r6,r26
ld r6,0(r6) /* get the value of _SDR1 */

View file

@ -224,10 +224,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL)
return -ENOMEM;
if (security_vm_enough_memory(vdso_pages)) {
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
memset(vma, 0, sizeof(*vma));
/*
@ -237,8 +234,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
*/
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (vdso_base & ~PAGE_MASK)
if (vdso_base & ~PAGE_MASK) {
kmem_cache_free(vm_area_cachep, vma);
return (int)vdso_base;
}
current->thread.vdso_base = vdso_base;
@ -266,7 +265,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
vma->vm_ops = &vdso_vmops;
down_write(&mm->mmap_sem);
insert_vm_struct(mm, vma);
if (insert_vm_struct(mm, vma)) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);

View file

@ -2,7 +2,7 @@
# Makefile for ppc64-specific library files..
#
lib-y := checksum.o dec_and_lock.o string.o strcase.o
lib-y := checksum.o string.o strcase.o
lib-y += copypage.o memcpy.o copyuser.o usercopy.o
# Lock primitives are defined as no-ops in include/linux/spinlock.h

View file

@ -1,47 +0,0 @@
/*
* ppc64 version of atomic_dec_and_lock() using cmpxchg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */
newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -33,14 +33,6 @@ config DEBUG_BOOTMEM
depends on DEBUG_KERNEL
bool "Debug BOOTMEM initialization"
# We have a custom atomic_dec_and_lock() implementation but it's not
# compatible with spinlock debugging so we need to fall back on
# the generic version in that case.
config HAVE_DEC_LOCK
bool
depends on SMP && !DEBUG_SPINLOCK
default y
config MCOUNT
bool
depends on STACK_DEBUG

View file

@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);

View file

@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
obj-y += iomap.o

View file

@ -1,80 +0,0 @@
/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
* dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
* using cas and ldstub instructions.
*
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
#include <linux/config.h>
#include <asm/thread_info.h>
.text
.align 64
/* CAS basically works like this:
*
* void CAS(MEM, REG1, REG2)
* {
* START_ATOMIC();
* if (*(MEM) == REG1) {
* TMP = *(MEM);
* *(MEM) = REG2;
* REG2 = TMP;
* } else
* REG2 = *(MEM);
* END_ATOMIC();
* }
*/
.globl _atomic_dec_and_lock
_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
loop1: lduw [%o0], %g2
subcc %g2, 1, %g7
be,pn %icc, start_to_zero
nop
nzero: cas [%o0], %g2, %g7
cmp %g2, %g7
bne,pn %icc, loop1
mov 0, %g1
out:
membar #StoreLoad | #StoreStore
retl
mov %g1, %o0
start_to_zero:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %g3
add %g3, 1, %g3
stw %g3, [%g6 + TI_PRE_COUNT]
#endif
to_zero:
ldstub [%o1], %g3
membar #StoreLoad | #StoreStore
brnz,pn %g3, spin_on_lock
nop
loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
cmp %g2, %g7
be,pt %icc, out
mov 1, %g1
lduw [%o0], %g2
subcc %g2, 1, %g7
be,pn %icc, loop2
nop
membar #StoreStore | #LoadStore
stb %g0, [%o1]
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %g3
sub %g3, 1, %g3
stw %g3, [%g6 + TI_PRE_COUNT]
#endif
b,pt %xcc, nzero
nop
spin_on_lock:
ldub [%o1], %g3
membar #LoadLoad
brnz,pt %g3, spin_on_lock
nop
ba,pt %xcc, to_zero
nop
nop

View file

@ -277,11 +277,6 @@ source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
config HAVE_DEC_LOCK
bool
depends on SMP
default y
config NR_CPUS
int "Maximum number of CPUs (2-256)"
range 2 256

View file

@ -353,11 +353,6 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec
mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!mpnt)
return -ENOMEM;
if (security_vm_enough_memory((IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))>>PAGE_SHIFT)) {
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
}
memset(mpnt, 0, sizeof(*mpnt));

View file

@ -52,17 +52,13 @@ int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma)
return -ENOMEM;
if (security_vm_enough_memory(npages)) {
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
memset(vma, 0, sizeof(struct vm_area_struct));
/* Could randomize here */
vma->vm_start = VSYSCALL32_BASE;
vma->vm_end = VSYSCALL32_END;
/* MAYWRITE to allow gdb to COW and set breakpoints */
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 7];
vma->vm_ops = &syscall32_vm_ops;

View file

@ -299,15 +299,6 @@ void __init check_ioapic(void)
#endif
/* RED-PEN skip them on mptables too? */
return;
case PCI_VENDOR_ID_ATI:
/* All timer interrupts on atiixp
are doubled. Disable one. */
if (disable_timer_pin_1 == 0) {
disable_timer_pin_1 = 1;
printk(KERN_INFO
"ATI board detected. Disabling timer pin 1.\n");
}
return;
}
/* No multi-function device? */

View file

@ -178,10 +178,6 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_HAVE_DEC_LOCK
EXPORT_SYMBOL(_atomic_dec_and_lock);
#endif
EXPORT_SYMBOL(die_chain);
EXPORT_SYMBOL(register_die_notifier);

View file

@ -10,5 +10,3 @@ lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
usercopy.o getuser.o putuser.o \
thunk.o clear_page.o copy_page.o bitstr.o bitops.o
lib-y += memcpy.o memmove.o memset.o copy_user.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o

View file

@ -1,40 +0,0 @@
/*
* x86 version of "atomic_dec_and_lock()" using
* the atomic "cmpxchg" instruction.
*
* (For CPU's lacking cmpxchg, we use the slow
* generic version, and this one never even gets
* compiled).
*/
#include <linux/spinlock.h>
#include <asm/atomic.h>
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
repeat:
counter = atomic_read(atomic);
newcount = counter-1;
if (!newcount)
goto slow_path;
asm volatile("lock; cmpxchgl %1,%2"
:"=a" (newcount)
:"r" (newcount), "m" (atomic->counter), "0" (counter));
/* If the above failed, "eax" will have changed */
if (newcount != counter)
goto repeat;
return 0;
slow_path:
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}

View file

@ -26,10 +26,6 @@ config RWSEM_XCHGADD_ALGORITHM
bool
default y
config HAVE_DEC_LOCK
bool
default y
config GENERIC_HARDIRQS
bool
default y

View file

@ -839,9 +839,6 @@ int __init hvc_init(void)
hvc_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(hvc_driver, &hvc_ops);
if (tty_register_driver(hvc_driver))
panic("Couldn't register hvc console driver\n");
/* Always start the kthread because there can be hotplug vty adapters
* added later. */
hvc_task = kthread_run(khvcd, NULL, "khvcd");
@ -851,6 +848,9 @@ int __init hvc_init(void)
return -EIO;
}
if (tty_register_driver(hvc_driver))
panic("Couldn't register hvc console driver\n");
return 0;
}
module_init(hvc_init);

View file

@ -810,13 +810,14 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
* from the top and bottom of cursor position
*/
old_origin += (vc->vc_y - new_rows/2) * old_row_size;
end = old_origin + new_screen_size;
end = old_origin + (old_row_size * new_rows);
}
} else
/*
* Cursor near the top, copy contents from the top of buffer
*/
end = (old_rows > new_rows) ? old_origin + new_screen_size :
end = (old_rows > new_rows) ? old_origin +
(old_row_size * new_rows) :
vc->vc_scr_end;
update_attr(vc);

View file

@ -30,6 +30,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <asm/hardware/arm_twd.h>
#include <asm/uaccess.h>
struct mpcore_wdt {

View file

@ -914,19 +914,23 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
return ret;
}
static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm i2c_pxa_algorithm = {
.name = "PXA-I2C-Algorithm",
.id = I2C_ALGO_PXA,
.master_xfer = i2c_pxa_xfer,
.functionality = i2c_pxa_functionality,
};
static struct pxa_i2c i2c_pxa = {
.lock = SPIN_LOCK_UNLOCKED,
.wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait),
.adap = {
.name = "pxa2xx-i2c",
.id = I2C_ALGO_PXA,
.owner = THIS_MODULE,
.algo = &i2c_pxa_algorithm,
.name = "pxa2xx-i2c",
.retries = 5,
},
};

View file

@ -611,7 +611,7 @@ static int sedlbauer_event(event_t event, int priority,
} /* sedlbauer_event */
static struct pcmcia_device_id sedlbauer_ids[] = {
PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", "speed star II", "V 3.1", "(c) 93 - 98 cb ", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a, 0x50d4149c),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe),

View file

@ -309,17 +309,25 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
/* If we're in D3, force entire word to 0.
/* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
if (dev->current_state >= PCI_D3hot) {
if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
switch (dev->current_state) {
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1;
/* Fall-through: force to D0 */
case PCI_D3hot:
case PCI_D3cold:
case PCI_POWER_ERROR:
pmcsr = 0;
} else {
break;
default:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
}
/* enter specified state */

View file

@ -1045,7 +1045,18 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
{
struct yenta_socket *socket;
int ret;
/*
* If we failed to assign proper bus numbers for this cardbus
* controller during PCI probe, its subordinate pci_bus is NULL.
* Bail out if so.
*/
if (!dev->subordinate) {
printk(KERN_ERR "Yenta: no bus associated with %s! "
"(try 'pci=assign-busses')\n", pci_name(dev));
return -ENODEV;
}
socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL);
if (!socket)
return -ENOMEM;

View file

@ -161,7 +161,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
u32 val, val2;
u32 val, val2 = 0;
u8 pmr;
if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@ -289,7 +289,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (ent->device != 0x182) {
if ((pmr & SIS_PMR_COMBINED) == 0) {
printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n");
port2_start=0x64;
port2_start = 64;
}
else {
printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n");

View file

@ -463,7 +463,7 @@ static int __init serial21285_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
extern struct uart_driver serial21285_reg;
static struct uart_driver serial21285_reg;
static struct console serial21285_console =
{

View file

@ -689,7 +689,7 @@ static int __init pl010_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
extern struct uart_driver amba_reg;
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAM",
.write = pl010_console_write,

View file

@ -701,7 +701,7 @@ static int __init pl011_console_setup(struct console *co, char *options)
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
extern struct uart_driver amba_reg;
static struct uart_driver amba_reg;
static struct console amba_console = {
.name = "ttyAMA",
.write = pl011_console_write,

View file

@ -525,7 +525,7 @@ static int __init clps711xuart_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
extern struct uart_driver clps711x_reg;
static struct uart_driver clps711x_reg;
static struct console clps711x_console = {
.name = "ttyCL",
.write = clps711xuart_console_write,

View file

@ -589,8 +589,8 @@ serial_pxa_type(struct uart_port *port)
#ifdef CONFIG_SERIAL_PXA_CONSOLE
extern struct uart_pxa_port serial_pxa_ports[];
extern struct uart_driver serial_pxa_reg;
static struct uart_pxa_port serial_pxa_ports[];
static struct uart_driver serial_pxa_reg;
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)

View file

@ -799,7 +799,7 @@ sa1100_console_setup(struct console *co, char *options)
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
extern struct uart_driver sa1100_reg;
static struct uart_driver sa1100_reg;
static struct console sa1100_console = {
.name = "ttySA",
.write = sa1100_console_write,

View file

@ -632,7 +632,7 @@ static int __init lh7a40xuart_console_setup (struct console* co, char* options)
return uart_set_options (port, co, baud, parity, bits, flow);
}
extern struct uart_driver lh7a40x_reg;
static struct uart_driver lh7a40x_reg;
static struct console lh7a40x_console = {
.name = "ttyAM",
.write = lh7a40xuart_console_write,

View file

@ -1020,7 +1020,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height)
{
if (width % 2 || width > ORIG_VIDEO_COLS || height > ORIG_VIDEO_LINES)
if (width % 2 || width > ORIG_VIDEO_COLS ||
height > (ORIG_VIDEO_LINES * vga_default_font_height)/
c->vc_font.height)
return -EINVAL;
if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */

View file

@ -209,10 +209,13 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
if (!edid && conn == 1) {
/* try to get from firmware */
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (edid)
memcpy(edid, fb_firmware_edid(info->device),
EDID_LENGTH);
const u8 *e = fb_firmware_edid(info->device);
if (e != NULL) {
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (edid)
memcpy(edid, e, EDID_LENGTH);
}
}
if (out_edid)

View file

@ -274,10 +274,13 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid)
if (!edid) {
/* try to get from firmware */
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (edid)
memcpy(edid, fb_firmware_edid(info->device),
EDID_LENGTH);
const u8 *e = fb_firmware_edid(info->device);
if (e) {
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (edid)
memcpy(edid, e, EDID_LENGTH);
}
}
if (out_edid)

View file

@ -60,8 +60,6 @@
#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip == S3_PROSAVAGEDDR))
/* Chip tags. These are used to group the adapters into
* related families.
*/
@ -74,8 +72,6 @@ typedef enum {
S3_PROSAVAGE,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_PROSAVAGEDDR,
S3_TWISTER,
S3_LAST
} savage_chipset;

View file

@ -1773,8 +1773,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
}
}
if (S3_SAVAGE_MOBILE_SERIES(par->chip) ||
(S3_MOBILE_TWISTER_SERIES(par->chip) && !par->crtonly))
if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly)
par->display_type = DISP_LCD;
else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
par->display_type = DISP_DFP;
@ -1783,7 +1782,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
/* Check LCD panel parrmation */
if (par->chip == S3_SAVAGE_MX) {
if (par->display_type == DISP_LCD) {
unsigned char cr6b = VGArCR( 0x6b );
int panelX = (VGArSEQ (0x61) +
@ -1922,15 +1921,15 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
snprintf (info->fix.id, 16, "ProSavageKM");
break;
case FB_ACCEL_S3TWISTER_P:
par->chip = S3_TWISTER;
par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "TwisterP");
break;
case FB_ACCEL_S3TWISTER_K:
par->chip = S3_TWISTER;
par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "TwisterK");
break;
case FB_ACCEL_PROSAVAGE_DDR:
par->chip = S3_PROSAVAGEDDR;
par->chip = S3_PROSAVAGE;
snprintf (info->fix.id, 16, "ProSavageDDR");
break;
case FB_ACCEL_PROSAVAGE_DDRK:

View file

@ -44,6 +44,8 @@
#include <linux/nfsd/syscall.h>
#include <linux/personality.h>
#include <linux/rwsem.h>
#include <linux/acct.h>
#include <linux/mm.h>
#include <net/sock.h> /* siocdevprivate_ioctl */
@ -1487,6 +1489,8 @@ int compat_do_execve(char * filename,
/* execve success */
security_bprm_free(bprm);
acct_update_integrals(current);
update_mem_hiwater(current);
kfree(bprm);
return retval;
}

View file

@ -421,11 +421,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
if (!mpnt)
return -ENOMEM;
if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
kmem_cache_free(vm_area_cachep, mpnt);
return -ENOMEM;
}
memset(mpnt, 0, sizeof(*mpnt));
down_write(&mm->mmap_sem);
@ -745,8 +740,8 @@ static inline int de_thread(struct task_struct *tsk)
}
/*
* Now there are really no other threads at all,
* so it's safe to stop telling them to kill themselves.
* There may be one thread left which is just exiting,
* but it's safe to stop telling the group to kill themselves.
*/
sig->flags = 0;
@ -785,7 +780,6 @@ no_thread_group:
kmem_cache_free(sighand_cachep, oldsighand);
}
BUG_ON(!thread_group_empty(current));
BUG_ON(!thread_group_leader(current));
return 0;
}

View file

@ -69,13 +69,9 @@ void free_fd_array(struct file **array, int num)
static void __free_fdtable(struct fdtable *fdt)
{
int fdset_size, fdarray_size;
fdset_size = fdt->max_fdset / 8;
fdarray_size = fdt->max_fds * sizeof(struct file *);
free_fdset(fdt->open_fds, fdset_size);
free_fdset(fdt->close_on_exec, fdset_size);
free_fd_array(fdt->fd, fdarray_size);
free_fdset(fdt->open_fds, fdt->max_fdset);
free_fdset(fdt->close_on_exec, fdt->max_fdset);
free_fd_array(fdt->fd, fdt->max_fds);
kfree(fdt);
}

View file

@ -0,0 +1,16 @@
#ifndef __ASM_HARDWARE_TWD_H
#define __ASM_HARDWARE_TWD_H
#define TWD_TIMER_LOAD 0x00
#define TWD_TIMER_COUNTER 0x04
#define TWD_TIMER_CONTROL 0x08
#define TWD_TIMER_INTSTAT 0x0C
#define TWD_WDOG_LOAD 0x20
#define TWD_WDOG_COUNTER 0x24
#define TWD_WDOG_CONTROL 0x28
#define TWD_WDOG_INTSTAT 0x2C
#define TWD_WDOG_RESETSTAT 0x30
#define TWD_WDOG_DISABLE 0x34
#endif

View file

@ -57,7 +57,9 @@
#include <linux/config.h>
#include <asm/fpu.h>
#ifndef ASM_OFFSETS_C
#include <asm/asm-offsets.h>
#endif
/*
* Base-2 logarithm of number of pages to allocate per task structure

View file

@ -5,7 +5,9 @@
#ifndef _ASM_IA64_THREAD_INFO_H
#define _ASM_IA64_THREAD_INFO_H
#ifndef ASM_OFFSETS_C
#include <asm/asm-offsets.h>
#endif
#include <asm/processor.h>
#include <asm/ptrace.h>
@ -51,9 +53,14 @@ struct thread_info {
}, \
}
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
#define alloc_thread_info(tsk) ((struct thread_info *) 0)
#endif
#define free_thread_info(ti) /* nothing */
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR

View file

@ -72,7 +72,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
static inline void flush_tlb_range(struct mm_struct *mm,
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
static inline void flush_tlb_kernel_range(unsigned long start,

View file

@ -20,6 +20,7 @@
#define NETLINK_IP6_FW 13
#define NETLINK_DNRTMSG 14 /* DECnet routing messages */
#define NETLINK_KOBJECT_UEVENT 15 /* Kernel messages to userspace */
#define NETLINK_GENERIC 16
#define MAX_LINKS 32

View file

@ -1355,7 +1355,7 @@
#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0
#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1
#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2
#define PCI_DEVICE_IDRME__DIGI96_8_PAD_OR_PST 0x3fc3
#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
#define PCI_DEVICE_ID_XILINX_HAMMERFALL 0x3fc4
#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6

View file

@ -84,6 +84,7 @@
#define IP_VS_CONN_F_IN_SEQ 0x0400 /* must do input seq adjust */
#define IP_VS_CONN_F_SEQ_MASK 0x0600 /* in/out sequence mask */
#define IP_VS_CONN_F_NO_CPORT 0x0800 /* no client port set yet */
#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */
/* Move it to better place one day, for now keep it unique */
#define NFC_IPVS_PROPERTY 0x10000
@ -739,6 +740,8 @@ enum {
extern struct ip_vs_conn *ip_vs_conn_in_get
(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
extern struct ip_vs_conn *ip_vs_ct_in_get
(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);
extern struct ip_vs_conn *ip_vs_conn_out_get
(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port);

View file

@ -1,7 +1,41 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/atomic.h>
#include <asm/system.h>
#ifdef __HAVE_ARCH_CMPXCHG
/*
* This is an implementation of the notion of "decrement a
* reference count, and return locked if it decremented to zero".
*
* This implementation can be used on any architecture that
* has a cmpxchg, and where atomic->value is an int holding
* the value of the atomic (i.e. the high bits aren't used
* for a lock or anything like that).
*/
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
{
int counter;
int newcount;
for (;;) {
counter = atomic_read(atomic);
newcount = counter - 1;
if (!newcount)
break; /* do it the slow way */
newcount = cmpxchg(&atomic->counter, counter, newcount);
if (newcount == counter)
return 0;
}
spin_lock(lock);
if (atomic_dec_and_test(atomic))
return 1;
spin_unlock(lock);
return 0;
}
#else
/*
* This is an architecture-neutral, but slow,
* implementation of the notion of "decrement
@ -33,5 +67,6 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
spin_unlock(lock);
return 0;
}
#endif
EXPORT_SYMBOL(_atomic_dec_and_lock);

View file

@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
return -ENOMEM;
if ((vma->vm_flags & VM_ACCOUNT) &&
security_vm_enough_memory(vma_pages(vma)))
return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}

View file

@ -659,7 +659,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
* kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized.
*/
BUG_ON(csizep->cs_cachep == NULL);
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
while (size > csizep->cs_size)
csizep++;

View file

@ -214,9 +214,11 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
.tos = RT_TOS(iph->tos)} }, .proto = 0};
if (!ip_route_output_key(&rt, &fl)) {
/* Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (((struct dst_entry *)rt)->dev == dev) {
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding.
* - Deal with redirected traffic. */
if (((struct dst_entry *)rt)->dev == dev ||
rt->rt_type == RTN_LOCAL) {
skb->dst = (struct dst_entry *)rt;
goto bridged_dnat;
}

View file

@ -1603,7 +1603,7 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
}
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_EXCLUDE] = 0;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
}

View file

@ -196,6 +196,7 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (s_addr==cp->caddr && s_port==cp->cport &&
d_port==cp->vport && d_addr==cp->vaddr &&
((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
protocol==cp->protocol) {
/* HIT */
atomic_inc(&cp->refcnt);
@ -227,6 +228,40 @@ struct ip_vs_conn *ip_vs_conn_in_get
return cp;
}
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get
(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port)
{
unsigned hash;
struct ip_vs_conn *cp;
hash = ip_vs_conn_hashkey(protocol, s_addr, s_port);
ct_read_lock(hash);
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (s_addr==cp->caddr && s_port==cp->cport &&
d_port==cp->vport && d_addr==cp->vaddr &&
cp->flags & IP_VS_CONN_F_TEMPLATE &&
protocol==cp->protocol) {
/* HIT */
atomic_inc(&cp->refcnt);
goto out;
}
}
cp = NULL;
out:
ct_read_unlock(hash);
IP_VS_DBG(7, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n",
ip_vs_proto_name(protocol),
NIPQUAD(s_addr), ntohs(s_port),
NIPQUAD(d_addr), ntohs(d_port),
cp?"hit":"not hit");
return cp;
}
/*
* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
@ -367,7 +402,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (cp->cport || (cp->flags & IP_VS_CONN_F_NO_CPORT)) {
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so increase the inactive
connection counter because it is in TCP SYNRECV
state (inactive) or other protocol inacive state */
@ -406,7 +441,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
atomic_read(&dest->refcnt));
/* Update the connection counters */
if (cp->cport || (cp->flags & IP_VS_CONN_F_NO_CPORT)) {
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
@ -467,7 +502,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
/*
* Invalidate the connection template
*/
if (ct->cport) {
if (ct->vport != 65535) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = 65535;
ct->vport = 65535;
@ -776,7 +811,7 @@ void ip_vs_random_dropentry(void)
ct_write_lock_bh(hash);
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (!cp->cport && !(cp->flags & IP_VS_CONN_F_NO_CPORT))
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;

View file

@ -243,10 +243,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
if (ports[1] == svc->port) {
/* Check if a template already exists */
if (svc->port != FTPPORT)
ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, ports[1]);
else
ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, 0);
if (!ct || !ip_vs_check_template(ct)) {
@ -272,14 +272,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
iph->daddr,
ports[1],
dest->addr, dest->port,
0,
IP_VS_CONN_F_TEMPLATE,
dest);
else
ct = ip_vs_conn_new(iph->protocol,
snet, 0,
iph->daddr, 0,
dest->addr, 0,
0,
IP_VS_CONN_F_TEMPLATE,
dest);
if (ct == NULL)
return NULL;
@ -298,10 +298,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
*/
if (svc->fwmark)
ct = ip_vs_conn_in_get(IPPROTO_IP, snet, 0,
ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0,
htonl(svc->fwmark), 0);
else
ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, 0);
if (!ct || !ip_vs_check_template(ct)) {
@ -326,14 +326,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
snet, 0,
htonl(svc->fwmark), 0,
dest->addr, 0,
0,
IP_VS_CONN_F_TEMPLATE,
dest);
else
ct = ip_vs_conn_new(iph->protocol,
snet, 0,
iph->daddr, 0,
dest->addr, 0,
0,
IP_VS_CONN_F_TEMPLATE,
dest);
if (ct == NULL)
return NULL;

View file

@ -297,16 +297,24 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
for (i=0; i<m->nr_conns; i++) {
unsigned flags;
s = (struct ip_vs_sync_conn *)p;
cp = ip_vs_conn_in_get(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport);
flags = ntohs(s->flags);
if (!(flags & IP_VS_CONN_F_TEMPLATE))
cp = ip_vs_conn_in_get(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport);
else
cp = ip_vs_ct_in_get(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport);
if (!cp) {
cp = ip_vs_conn_new(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport,
s->daddr, s->dport,
ntohs(s->flags), NULL);
flags, NULL);
if (!cp) {
IP_VS_ERR("ip_vs_conn_new failed\n");
return;
@ -315,11 +323,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
} else if (!cp->dest) {
/* it is an entry created by the synchronization */
cp->state = ntohs(s->state);
cp->flags = ntohs(s->flags) | IP_VS_CONN_F_HASHED;
cp->flags = flags | IP_VS_CONN_F_HASHED;
} /* Note that we don't touch its state and flags
if it is a normal entry. */
if (ntohs(s->flags) & IP_VS_CONN_F_SEQ_MASK) {
if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
memcpy(&cp->in_seq, opt, sizeof(*opt));
p += FULL_CONN_SIZE;

View file

@ -979,14 +979,19 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
pcount = tcp_skb_pcount(skb);
if (pcount > 1 &&
(after(start_seq, TCP_SKB_CB(skb)->seq) ||
before(end_seq, TCP_SKB_CB(skb)->end_seq))) {
if (pcount > 1 && !in_sack &&
after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
unsigned int pkt_len;
if (after(start_seq, TCP_SKB_CB(skb)->seq))
in_sack = !after(start_seq,
TCP_SKB_CB(skb)->seq);
if (!in_sack)
pkt_len = (start_seq -
TCP_SKB_CB(skb)->seq);
else
@ -999,9 +1004,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
fack_count += pcount;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
sacked = TCP_SKB_CB(skb)->sacked;
/* Account D-SACK for retransmitted packet. */

View file

@ -435,6 +435,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
int nsize, old_factor;
u16 flags;
BUG_ON(len >= skb->len);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;

View file

@ -1968,7 +1968,7 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
}
pmc->mca_sources = NULL;
pmc->mca_sfmode = MCAST_EXCLUDE;
pmc->mca_sfcount[MCAST_EXCLUDE] = 0;
pmc->mca_sfcount[MCAST_INCLUDE] = 0;
pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
}

View file

@ -405,9 +405,8 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
continue;
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
return s;
continue;
if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
continue;
}
if(!inet6_mc_check(s, loc_addr, rmt_addr))
continue;

View file

@ -228,11 +228,11 @@ typedef struct snd_rme32 {
} rme32_t;
static struct pci_device_id snd_rme32_ids[] = {
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32,
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32_8,
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_DIGI32_PRO,
{PCI_VENDOR_ID_XILINX_RME, PCI_DEVICE_ID_RME_DIGI32_PRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
{0,}
};
@ -240,7 +240,7 @@ static struct pci_device_id snd_rme32_ids[] = {
MODULE_DEVICE_TABLE(pci, snd_rme32_ids);
#define RME32_ISWORKING(rme32) ((rme32)->wcreg & RME32_WCR_START)
#define RME32_PRO_WITH_8414(rme32) ((rme32)->pci->device == PCI_DEVICE_ID_DIGI32_PRO && (rme32)->rev == RME32_PRO_REVISION_WITH_8414)
#define RME32_PRO_WITH_8414(rme32) ((rme32)->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO && (rme32)->rev == RME32_PRO_REVISION_WITH_8414)
static int snd_rme32_playback_prepare(snd_pcm_substream_t * substream);
@ -527,21 +527,21 @@ static int snd_rme32_playback_setrate(rme32_t * rme32, int rate)
RME32_WCR_FREQ_1;
break;
case 64000:
if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO)
if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO)
return -EINVAL;
rme32->wcreg |= RME32_WCR_DS_BM;
rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) &
~RME32_WCR_FREQ_1;
break;
case 88200:
if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO)
if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO)
return -EINVAL;
rme32->wcreg |= RME32_WCR_DS_BM;
rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_1) &
~RME32_WCR_FREQ_0;
break;
case 96000:
if (rme32->pci->device != PCI_DEVICE_ID_DIGI32_PRO)
if (rme32->pci->device != PCI_DEVICE_ID_RME_DIGI32_PRO)
return -EINVAL;
rme32->wcreg |= RME32_WCR_DS_BM;
rme32->wcreg = (rme32->wcreg | RME32_WCR_FREQ_0) |
@ -881,7 +881,7 @@ static int snd_rme32_playback_spdif_open(snd_pcm_substream_t * substream)
runtime->hw = snd_rme32_spdif_fd_info;
else
runtime->hw = snd_rme32_spdif_info;
if (rme32->pci->device == PCI_DEVICE_ID_DIGI32_PRO) {
if (rme32->pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO) {
runtime->hw.rates |= SNDRV_PCM_RATE_64000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000;
runtime->hw.rate_max = 96000;
}
@ -1408,8 +1408,8 @@ static int __devinit snd_rme32_create(rme32_t * rme32)
}
/* set up ALSA pcm device for ADAT */
if ((pci->device == PCI_DEVICE_ID_DIGI32) ||
(pci->device == PCI_DEVICE_ID_DIGI32_PRO)) {
if ((pci->device == PCI_DEVICE_ID_RME_DIGI32) ||
(pci->device == PCI_DEVICE_ID_RME_DIGI32_PRO)) {
/* ADAT is not available on DIGI32 and DIGI32 Pro */
rme32->adat_pcm = NULL;
}
@ -1639,11 +1639,11 @@ snd_rme32_info_inputtype_control(snd_kcontrol_t * kcontrol,
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
switch (rme32->pci->device) {
case PCI_DEVICE_ID_DIGI32:
case PCI_DEVICE_ID_DIGI32_8:
case PCI_DEVICE_ID_RME_DIGI32:
case PCI_DEVICE_ID_RME_DIGI32_8:
uinfo->value.enumerated.items = 3;
break;
case PCI_DEVICE_ID_DIGI32_PRO:
case PCI_DEVICE_ID_RME_DIGI32_PRO:
uinfo->value.enumerated.items = 4;
break;
default:
@ -1670,11 +1670,11 @@ snd_rme32_get_inputtype_control(snd_kcontrol_t * kcontrol,
ucontrol->value.enumerated.item[0] = snd_rme32_getinputtype(rme32);
switch (rme32->pci->device) {
case PCI_DEVICE_ID_DIGI32:
case PCI_DEVICE_ID_DIGI32_8:
case PCI_DEVICE_ID_RME_DIGI32:
case PCI_DEVICE_ID_RME_DIGI32_8:
items = 3;
break;
case PCI_DEVICE_ID_DIGI32_PRO:
case PCI_DEVICE_ID_RME_DIGI32_PRO:
items = 4;
break;
default:
@ -1697,11 +1697,11 @@ snd_rme32_put_inputtype_control(snd_kcontrol_t * kcontrol,
int change, items = 3;
switch (rme32->pci->device) {
case PCI_DEVICE_ID_DIGI32:
case PCI_DEVICE_ID_DIGI32_8:
case PCI_DEVICE_ID_RME_DIGI32:
case PCI_DEVICE_ID_RME_DIGI32_8:
items = 3;
break;
case PCI_DEVICE_ID_DIGI32_PRO:
case PCI_DEVICE_ID_RME_DIGI32_PRO:
items = 4;
break;
default:
@ -1982,13 +1982,13 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
strcpy(card->driver, "Digi32");
switch (rme32->pci->device) {
case PCI_DEVICE_ID_DIGI32:
case PCI_DEVICE_ID_RME_DIGI32:
strcpy(card->shortname, "RME Digi32");
break;
case PCI_DEVICE_ID_DIGI32_8:
case PCI_DEVICE_ID_RME_DIGI32_8:
strcpy(card->shortname, "RME Digi32/8");
break;
case PCI_DEVICE_ID_DIGI32_PRO:
case PCI_DEVICE_ID_RME_DIGI32_PRO:
strcpy(card->shortname, "RME Digi32 PRO");
break;
}

View file

@ -233,13 +233,13 @@ typedef struct snd_rme96 {
} rme96_t;
static struct pci_device_id snd_rme96_ids[] = {
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96,
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8,
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8_PRO,
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PRO,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST,
{ PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
{ 0, }
};
@ -248,12 +248,12 @@ MODULE_DEVICE_TABLE(pci, snd_rme96_ids);
#define RME96_ISPLAYING(rme96) ((rme96)->wcreg & RME96_WCR_START)
#define RME96_ISRECORDING(rme96) ((rme96)->wcreg & RME96_WCR_START_2)
#define RME96_HAS_ANALOG_IN(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST)
#define RME96_HAS_ANALOG_OUT(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PRO || \
(rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST)
#define RME96_HAS_ANALOG_IN(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST)
#define RME96_HAS_ANALOG_OUT(rme96) ((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO || \
(rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST)
#define RME96_DAC_IS_1852(rme96) (RME96_HAS_ANALOG_OUT(rme96) && (rme96)->rev >= 4)
#define RME96_DAC_IS_1855(rme96) (((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && (rme96)->rev < 4) || \
((rme96)->pci->device == PCI_DEVICE_ID_DIGI96_8_PRO && (rme96)->rev == 2))
#define RME96_DAC_IS_1855(rme96) (((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && (rme96)->rev < 4) || \
((rme96)->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PRO && (rme96)->rev == 2))
#define RME96_185X_MAX_OUT(rme96) ((1 << (RME96_DAC_IS_1852(rme96) ? RME96_AD1852_VOL_BITS : RME96_AD1855_VOL_BITS)) - 1)
static int
@ -830,9 +830,9 @@ snd_rme96_setinputtype(rme96_t *rme96,
RME96_WCR_INP_1;
break;
case RME96_INPUT_XLR:
if ((rme96->pci->device != PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST &&
rme96->pci->device != PCI_DEVICE_ID_DIGI96_8_PRO) ||
(rme96->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST &&
if ((rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST &&
rme96->pci->device != PCI_DEVICE_ID_RME_DIGI96_8_PRO) ||
(rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST &&
rme96->rev > 4))
{
/* Only Digi96/8 PRO and Digi96/8 PAD supports XLR */
@ -1598,7 +1598,7 @@ snd_rme96_create(rme96_t *rme96)
rme96->spdif_pcm->info_flags = 0;
/* set up ALSA pcm device for ADAT */
if (pci->device == PCI_DEVICE_ID_DIGI96) {
if (pci->device == PCI_DEVICE_ID_RME_DIGI96) {
/* ADAT is not available on the base model */
rme96->adat_pcm = NULL;
} else {
@ -1858,14 +1858,14 @@ snd_rme96_info_inputtype_control(snd_kcontrol_t *kcontrol, snd_ctl_elem_info_t *
uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
uinfo->count = 1;
switch (rme96->pci->device) {
case PCI_DEVICE_ID_DIGI96:
case PCI_DEVICE_ID_DIGI96_8:
case PCI_DEVICE_ID_RME_DIGI96:
case PCI_DEVICE_ID_RME_DIGI96_8:
uinfo->value.enumerated.items = 3;
break;
case PCI_DEVICE_ID_DIGI96_8_PRO:
case PCI_DEVICE_ID_RME_DIGI96_8_PRO:
uinfo->value.enumerated.items = 4;
break;
case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST:
case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST:
if (rme96->rev > 4) {
/* PST */
uinfo->value.enumerated.items = 4;
@ -1895,14 +1895,14 @@ snd_rme96_get_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
ucontrol->value.enumerated.item[0] = snd_rme96_getinputtype(rme96);
switch (rme96->pci->device) {
case PCI_DEVICE_ID_DIGI96:
case PCI_DEVICE_ID_DIGI96_8:
case PCI_DEVICE_ID_RME_DIGI96:
case PCI_DEVICE_ID_RME_DIGI96_8:
items = 3;
break;
case PCI_DEVICE_ID_DIGI96_8_PRO:
case PCI_DEVICE_ID_RME_DIGI96_8_PRO:
items = 4;
break;
case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST:
case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST:
if (rme96->rev > 4) {
/* for handling PST case, (INPUT_ANALOG is moved to INPUT_XLR */
if (ucontrol->value.enumerated.item[0] == RME96_INPUT_ANALOG) {
@ -1932,14 +1932,14 @@ snd_rme96_put_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
int change, items = 3;
switch (rme96->pci->device) {
case PCI_DEVICE_ID_DIGI96:
case PCI_DEVICE_ID_DIGI96_8:
case PCI_DEVICE_ID_RME_DIGI96:
case PCI_DEVICE_ID_RME_DIGI96_8:
items = 3;
break;
case PCI_DEVICE_ID_DIGI96_8_PRO:
case PCI_DEVICE_ID_RME_DIGI96_8_PRO:
items = 4;
break;
case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST:
case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST:
if (rme96->rev > 4) {
items = 4;
} else {
@ -1953,7 +1953,7 @@ snd_rme96_put_inputtype_control(snd_kcontrol_t * kcontrol, snd_ctl_elem_value_t
val = ucontrol->value.enumerated.item[0] % items;
/* special case for PST */
if (rme96->pci->device == PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST && rme96->rev > 4) {
if (rme96->pci->device == PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST && rme96->rev > 4) {
if (val == RME96_INPUT_XLR) {
val = RME96_INPUT_ANALOG;
}
@ -2375,16 +2375,16 @@ snd_rme96_probe(struct pci_dev *pci,
strcpy(card->driver, "Digi96");
switch (rme96->pci->device) {
case PCI_DEVICE_ID_DIGI96:
case PCI_DEVICE_ID_RME_DIGI96:
strcpy(card->shortname, "RME Digi96");
break;
case PCI_DEVICE_ID_DIGI96_8:
case PCI_DEVICE_ID_RME_DIGI96_8:
strcpy(card->shortname, "RME Digi96/8");
break;
case PCI_DEVICE_ID_DIGI96_8_PRO:
case PCI_DEVICE_ID_RME_DIGI96_8_PRO:
strcpy(card->shortname, "RME Digi96/8 PRO");
break;
case PCI_DEVICE_ID_DIGI96_8_PAD_OR_PST:
case PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST:
pci_read_config_byte(rme96->pci, 8, &val);
if (val < 5) {
strcpy(card->shortname, "RME Digi96/8 PAD");