Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial

* git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial: (48 commits)
  Documentation: fix minor kernel-doc warnings
  BUG_ON() Conversion in drivers/net/
  BUG_ON() Conversion in drivers/s390/net/lcs.c
  BUG_ON() Conversion in mm/slab.c
  BUG_ON() Conversion in mm/highmem.c
  BUG_ON() Conversion in kernel/signal.c
  BUG_ON() Conversion in kernel/signal.c
  BUG_ON() Conversion in kernel/ptrace.c
  BUG_ON() Conversion in ipc/shm.c
  BUG_ON() Conversion in fs/freevxfs/
  BUG_ON() Conversion in fs/udf/
  BUG_ON() Conversion in fs/sysv/
  BUG_ON() Conversion in fs/inode.c
  BUG_ON() Conversion in fs/fcntl.c
  BUG_ON() Conversion in fs/dquot.c
  BUG_ON() Conversion in md/raid10.c
  BUG_ON() Conversion in md/raid6main.c
  BUG_ON() Conversion in md/raid5.c
  Fix minor documentation typo
  BFP->BPF in Documentation/networking/tuntap.txt
  ...
This commit is contained in:
Linus Torvalds 2006-04-02 12:58:45 -07:00
commit 63589ed078
68 changed files with 260 additions and 367 deletions

View file

@ -2,7 +2,7 @@
# This makefile is used to generate the kernel documentation, # This makefile is used to generate the kernel documentation,
# primarily based on in-line comments in various source files. # primarily based on in-line comments in various source files.
# See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how # See Documentation/kernel-doc-nano-HOWTO.txt for instruction in how
# to ducument the SRC - and how to read it. # to document the SRC - and how to read it.
# To add a new book the only step required is to add the book to the # To add a new book the only step required is to add the book to the
# list of DOCBOOKS. # list of DOCBOOKS.

View file

@ -322,7 +322,6 @@ X!Earch/i386/kernel/mca.c
<chapter id="sysfs"> <chapter id="sysfs">
<title>The Filesystem for Exporting Kernel Objects</title> <title>The Filesystem for Exporting Kernel Objects</title>
!Efs/sysfs/file.c !Efs/sysfs/file.c
!Efs/sysfs/dir.c
!Efs/sysfs/symlink.c !Efs/sysfs/symlink.c
!Efs/sysfs/bin.c !Efs/sysfs/bin.c
</chapter> </chapter>

View file

@ -30,7 +30,7 @@ specific hotkey(event))
echo "event_num:event_type:event_argument" > echo "event_num:event_type:event_argument" >
/proc/acpi/hotkey/action. /proc/acpi/hotkey/action.
The result of the execution of this aml method is The result of the execution of this aml method is
attached to /proc/acpi/hotkey/poll_method, which is dnyamically attached to /proc/acpi/hotkey/poll_method, which is dynamically
created. Please use command "cat /proc/acpi/hotkey/polling_method" created. Please use command "cat /proc/acpi/hotkey/polling_method"
to retrieve it. to retrieve it.

View file

@ -2,16 +2,18 @@
INTERNAL KERNEL ABI FOR FR-V ARCH INTERNAL KERNEL ABI FOR FR-V ARCH
================================= =================================
The internal FRV kernel ABI is not quite the same as the userspace ABI. A number of the registers The internal FRV kernel ABI is not quite the same as the userspace ABI. A
are used for special purposed, and the ABI is not consistent between modules vs core, and MMU vs number of the registers are used for special purposed, and the ABI is not
no-MMU. consistent between modules vs core, and MMU vs no-MMU.
This partly stems from the fact that FRV CPUs do not have a separate supervisor stack pointer, and This partly stems from the fact that FRV CPUs do not have a separate
most of them do not have any scratch registers, thus requiring at least one general purpose supervisor stack pointer, and most of them do not have any scratch
register to be clobbered in such an event. Also, within the kernel core, it is possible to simply registers, thus requiring at least one general purpose register to be
jump or call directly between functions using a relative offset. This cannot be extended to modules clobbered in such an event. Also, within the kernel core, it is possible to
for the displacement is likely to be too far. Thus in modules the address of a function to call simply jump or call directly between functions using a relative offset.
must be calculated in a register and then used, requiring two extra instructions. This cannot be extended to modules for the displacement is likely to be too
far. Thus in modules the address of a function to call must be calculated
in a register and then used, requiring two extra instructions.
This document has the following sections: This document has the following sections:
@ -39,7 +41,8 @@ When a system call is made, the following registers are effective:
CPU OPERATING MODES CPU OPERATING MODES
=================== ===================
The FR-V CPU has three basic operating modes. In order of increasing capability: The FR-V CPU has three basic operating modes. In order of increasing
capability:
(1) User mode. (1) User mode.
@ -47,42 +50,46 @@ The FR-V CPU has three basic operating modes. In order of increasing capability:
(2) Kernel mode. (2) Kernel mode.
Normal kernel mode. There are many additional control registers available that may be Normal kernel mode. There are many additional control registers
accessed in this mode, in addition to all the stuff available to user mode. This has two available that may be accessed in this mode, in addition to all the
submodes: stuff available to user mode. This has two submodes:
(a) Exceptions enabled (PSR.T == 1). (a) Exceptions enabled (PSR.T == 1).
Exceptions will invoke the appropriate normal kernel mode handler. On entry to the Exceptions will invoke the appropriate normal kernel mode
handler, the PSR.T bit will be cleared. handler. On entry to the handler, the PSR.T bit will be cleared.
(b) Exceptions disabled (PSR.T == 0). (b) Exceptions disabled (PSR.T == 0).
No exceptions or interrupts may happen. Any mandatory exceptions will cause the CPU to No exceptions or interrupts may happen. Any mandatory exceptions
halt unless the CPU is told to jump into debug mode instead. will cause the CPU to halt unless the CPU is told to jump into
debug mode instead.
(3) Debug mode. (3) Debug mode.
No exceptions may happen in this mode. Memory protection and management exceptions will be No exceptions may happen in this mode. Memory protection and
flagged for later consideration, but the exception handler won't be invoked. Debugging traps management exceptions will be flagged for later consideration, but
such as hardware breakpoints and watchpoints will be ignored. This mode is entered only by the exception handler won't be invoked. Debugging traps such as
debugging events obtained from the other two modes. hardware breakpoints and watchpoints will be ignored. This mode is
entered only by debugging events obtained from the other two modes.
All kernel mode registers may be accessed, plus a few extra debugging specific registers. All kernel mode registers may be accessed, plus a few extra debugging
specific registers.
================================= =================================
INTERNAL KERNEL-MODE REGISTER ABI INTERNAL KERNEL-MODE REGISTER ABI
================================= =================================
There are a number of permanent register assignments that are set up by entry.S in the exception There are a number of permanent register assignments that are set up by
prologue. Note that there is a complete set of exception prologues for each of user->kernel entry.S in the exception prologue. Note that there is a complete set of
transition and kernel->kernel transition. There are also user->debug and kernel->debug mode exception prologues for each of user->kernel transition and kernel->kernel
transition prologues. transition. There are also user->debug and kernel->debug mode transition
prologues.
REGISTER FLAVOUR USE REGISTER FLAVOUR USE
=============== ======= ==================================================== =============== ======= ==============================================
GR1 Supervisor stack pointer GR1 Supervisor stack pointer
GR15 Current thread info pointer GR15 Current thread info pointer
GR16 GP-Rel base register for small data GR16 GP-Rel base register for small data
@ -92,10 +99,12 @@ transition prologues.
GR31 NOMMU Destroyed by debug mode entry GR31 NOMMU Destroyed by debug mode entry
GR31 MMU Destroyed by TLB miss kernel mode entry GR31 MMU Destroyed by TLB miss kernel mode entry
CCR.ICC2 Virtual interrupt disablement tracking CCR.ICC2 Virtual interrupt disablement tracking
CCCR.CC3 Cleared by exception prologue (atomic op emulation) CCCR.CC3 Cleared by exception prologue
(atomic op emulation)
SCR0 MMU See mmu-layout.txt. SCR0 MMU See mmu-layout.txt.
SCR1 MMU See mmu-layout.txt. SCR1 MMU See mmu-layout.txt.
SCR2 MMU Save for EAR0 (destroyed by icache insns in debug mode) SCR2 MMU Save for EAR0 (destroyed by icache insns
in debug mode)
SCR3 MMU Save for GR31 during debug exceptions SCR3 MMU Save for GR31 during debug exceptions
DAMR/IAMR NOMMU Fixed memory protection layout. DAMR/IAMR NOMMU Fixed memory protection layout.
DAMR/IAMR MMU See mmu-layout.txt. DAMR/IAMR MMU See mmu-layout.txt.
@ -104,18 +113,21 @@ transition prologues.
Certain registers are also used or modified across function calls: Certain registers are also used or modified across function calls:
REGISTER CALL RETURN REGISTER CALL RETURN
=============== =============================== =============================== =============== =============================== ======================
GR0 Fixed Zero - GR0 Fixed Zero -
GR2 Function call frame pointer GR2 Function call frame pointer
GR3 Special Preserved GR3 Special Preserved
GR3-GR7 - Clobbered GR3-GR7 - Clobbered
GR8 Function call arg #1 Return value (or clobbered) GR8 Function call arg #1 Return value
GR9 Function call arg #2 Return value MSW (or clobbered) (or clobbered)
GR9 Function call arg #2 Return value MSW
(or clobbered)
GR10-GR13 Function call arg #3-#6 Clobbered GR10-GR13 Function call arg #3-#6 Clobbered
GR14 - Clobbered GR14 - Clobbered
GR15-GR16 Special Preserved GR15-GR16 Special Preserved
GR17-GR27 - Preserved GR17-GR27 - Preserved
GR28-GR31 Special Only accessed explicitly GR28-GR31 Special Only accessed
explicitly
LR Return address after CALL Clobbered LR Return address after CALL Clobbered
CCR/CCCR - Mostly Clobbered CCR/CCCR - Mostly Clobbered
@ -124,46 +136,53 @@ Certain registers are also used or modified across function calls:
INTERNAL DEBUG-MODE REGISTER ABI INTERNAL DEBUG-MODE REGISTER ABI
================================ ================================
This is the same as the kernel-mode register ABI for functions calls. The difference is that in This is the same as the kernel-mode register ABI for functions calls. The
debug-mode there's a different stack and a different exception frame. Almost all the global difference is that in debug-mode there's a different stack and a different
registers from kernel-mode (including the stack pointer) may be changed. exception frame. Almost all the global registers from kernel-mode
(including the stack pointer) may be changed.
REGISTER FLAVOUR USE REGISTER FLAVOUR USE
=============== ======= ==================================================== =============== ======= ==============================================
GR1 Debug stack pointer GR1 Debug stack pointer
GR16 GP-Rel base register for small data GR16 GP-Rel base register for small data
GR31 Current debug exception frame pointer (__debug_frame) GR31 Current debug exception frame pointer
(__debug_frame)
SCR3 MMU Saved value of GR31 SCR3 MMU Saved value of GR31
Note that debug mode is able to interfere with the kernel's emulated atomic ops, so it must be Note that debug mode is able to interfere with the kernel's emulated atomic
exceedingly careful not to do any that would interact with the main kernel in this regard. Hence ops, so it must be exceedingly careful not to do any that would interact
the debug mode code (gdbstub) is almost completely self-contained. The only external code used is with the main kernel in this regard. Hence the debug mode code (gdbstub) is
the sprintf family of functions. almost completely self-contained. The only external code used is the
sprintf family of functions.
Futhermore, break.S is so complicated because single-step mode does not switch off on entry to an Futhermore, break.S is so complicated because single-step mode does not
exception. That means unless manually disabled, single-stepping will blithely go on stepping into switch off on entry to an exception. That means unless manually disabled,
things like interrupts. See gdbstub.txt for more information. single-stepping will blithely go on stepping into things like interrupts.
See gdbstub.txt for more information.
========================== ==========================
VIRTUAL INTERRUPT HANDLING VIRTUAL INTERRUPT HANDLING
========================== ==========================
Because accesses to the PSR is so slow, and to disable interrupts we have to access it twice (once Because accesses to the PSR is so slow, and to disable interrupts we have
to read and once to write), we don't actually disable interrupts at all if we don't have to. What to access it twice (once to read and once to write), we don't actually
we do instead is use the ICC2 condition code flags to note virtual disablement, such that if we disable interrupts at all if we don't have to. What we do instead is use
then do take an interrupt, we note the flag, really disable interrupts, set another flag and resume the ICC2 condition code flags to note virtual disablement, such that if we
execution at the point the interrupt happened. Setting condition flags as a side effect of an then do take an interrupt, we note the flag, really disable interrupts, set
arithmetic or logical instruction is really fast. This use of the ICC2 only occurs within the another flag and resume execution at the point the interrupt happened.
Setting condition flags as a side effect of an arithmetic or logical
instruction is really fast. This use of the ICC2 only occurs within the
kernel - it does not affect userspace. kernel - it does not affect userspace.
The flags we use are: The flags we use are:
(*) CCR.ICC2.Z [Zero flag] (*) CCR.ICC2.Z [Zero flag]
Set to virtually disable interrupts, clear when interrupts are virtually enabled. Can be Set to virtually disable interrupts, clear when interrupts are
modified by logical instructions without affecting the Carry flag. virtually enabled. Can be modified by logical instructions without
affecting the Carry flag.
(*) CCR.ICC2.C [Carry flag] (*) CCR.ICC2.C [Carry flag]
@ -176,8 +195,9 @@ What happens is this:
ICC2.Z is 0, ICC2.C is 1. ICC2.Z is 0, ICC2.C is 1.
(2) An interrupt occurs. The exception prologue examines ICC2.Z and determines that nothing needs (2) An interrupt occurs. The exception prologue examines ICC2.Z and
doing. This is done simply with an unlikely BEQ instruction. determines that nothing needs doing. This is done simply with an
unlikely BEQ instruction.
(3) The interrupts are disabled (local_irq_disable) (3) The interrupts are disabled (local_irq_disable)
@ -187,48 +207,56 @@ What happens is this:
ICC2.Z would be set to 0. ICC2.Z would be set to 0.
A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would be used to trap if A TIHI #2 instruction (trap #2 if condition HI - Z==0 && C==0) would
interrupts were now virtually enabled, but physically disabled - which they're not, so the be used to trap if interrupts were now virtually enabled, but
trap isn't taken. The kernel would then be back to state (1). physically disabled - which they're not, so the trap isn't taken. The
kernel would then be back to state (1).
(5) An interrupt occurs. The exception prologue examines ICC2.Z and determines that the interrupt (5) An interrupt occurs. The exception prologue examines ICC2.Z and
shouldn't actually have happened. It jumps aside, and there disabled interrupts by setting determines that the interrupt shouldn't actually have happened. It
PSR.PIL to 14 and then it clears ICC2.C. jumps aside, and there disabled interrupts by setting PSR.PIL to 14
and then it clears ICC2.C.
(6) If interrupts were then saved and disabled again (local_irq_save): (6) If interrupts were then saved and disabled again (local_irq_save):
ICC2.Z would be shifted into the save variable and masked off (giving a 1). ICC2.Z would be shifted into the save variable and masked off
(giving a 1).
ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be unaffected (ie: 0). ICC2.Z would then be set to 1 (thus unchanged), and ICC2.C would be
unaffected (ie: 0).
(7) If interrupts were then restored from state (6) (local_irq_restore): (7) If interrupts were then restored from state (6) (local_irq_restore):
ICC2.Z would be set to indicate the result of XOR'ing the saved value (ie: 1) with 1, which ICC2.Z would be set to indicate the result of XOR'ing the saved
gives a result of 0 - thus leaving ICC2.Z set. value (ie: 1) with 1, which gives a result of 0 - thus leaving
ICC2.Z set.
ICC2.C would remain unaffected (ie: 0). ICC2.C would remain unaffected (ie: 0).
A TIHI #2 instruction would be used to again assay the current state, but this would do A TIHI #2 instruction would be used to again assay the current state,
nothing as Z==1. but this would do nothing as Z==1.
(8) If interrupts were then enabled (local_irq_enable): (8) If interrupts were then enabled (local_irq_enable):
ICC2.Z would be cleared. ICC2.C would be left unaffected. Both flags would now be 0. ICC2.Z would be cleared. ICC2.C would be left unaffected. Both
flags would now be 0.
A TIHI #2 instruction again issued to assay the current state would then trap as both Z==0 A TIHI #2 instruction again issued to assay the current state would
[interrupts virtually enabled] and C==0 [interrupts really disabled] would then be true. then trap as both Z==0 [interrupts virtually enabled] and C==0
[interrupts really disabled] would then be true.
(9) The trap #2 handler would simply enable hardware interrupts (set PSR.PIL to 0), set ICC2.C to (9) The trap #2 handler would simply enable hardware interrupts
1 and return. (set PSR.PIL to 0), set ICC2.C to 1 and return.
(10) Immediately upon returning, the pending interrupt would be taken. (10) Immediately upon returning, the pending interrupt would be taken.
(11) The interrupt handler would take the path of actually processing the interrupt (ICC2.Z is (11) The interrupt handler would take the path of actually processing the
clear, BEQ fails as per step (2)). interrupt (ICC2.Z is clear, BEQ fails as per step (2)).
(12) The interrupt handler would then set ICC2.C to 1 since hardware interrupts are definitely (12) The interrupt handler would then set ICC2.C to 1 since hardware
enabled - or else the kernel wouldn't be here. interrupts are definitely enabled - or else the kernel wouldn't be here.
(13) On return from the interrupt handler, things would be back to state (1). (13) On return from the interrupt handler, things would be back to state (1).
This trap (#2) is only available in kernel mode. In user mode it will result in SIGILL. This trap (#2) is only available in kernel mode. In user mode it will
result in SIGILL.

View file

@ -1,4 +1,4 @@
February 2003 Kernel Parameters v2.5.59 Kernel Parameters
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
The following is a consolidated list of the kernel parameters as implemented The following is a consolidated list of the kernel parameters as implemented
@ -17,9 +17,17 @@ are specified on the kernel command line with the module name plus
usbcore.blinkenlights=1 usbcore.blinkenlights=1
The text in square brackets at the beginning of the description states the This document may not be entirely up to date and comprehensive. The command
restrictions on the kernel for the said kernel parameter to be valid. The "modinfo -p ${modulename}" shows a current list of all parameters of a loadable
restrictions referred to are that the relevant option is valid if: module. Loadable modules, after being loaded into the running kernel, also
reveal their parameters in /sys/module/${modulename}/parameters/. Some of these
parameters may be changed at runtime by the command
"echo -n ${value} > /sys/module/${modulename}/parameters/${parm}".
The parameters listed below are only valid if certain kernel build options were
enabled and if respective hardware is present. The text in square brackets at
the beginning of each description states the restrictions within which a
parameter is applicable:
ACPI ACPI support is enabled. ACPI ACPI support is enabled.
ALSA ALSA sound support is enabled. ALSA ALSA sound support is enabled.
@ -1046,10 +1054,10 @@ running once the system is up.
noltlbs [PPC] Do not use large page/tlb entries for kernel noltlbs [PPC] Do not use large page/tlb entries for kernel
lowmem mapping on PPC40x. lowmem mapping on PPC40x.
nomce [IA-32] Machine Check Exception
nomca [IA-64] Disable machine check abort handling nomca [IA-64] Disable machine check abort handling
nomce [IA-32] Machine Check Exception
noresidual [PPC] Don't use residual data on PReP machines. noresidual [PPC] Don't use residual data on PReP machines.
noresume [SWSUSP] Disables resume and restores original swap noresume [SWSUSP] Disables resume and restores original swap
@ -1682,20 +1690,6 @@ running once the system is up.
______________________________________________________________________ ______________________________________________________________________
Changelog:
2000-06-?? Mr. Unknown
The last known update (for 2.4.0) - the changelog was not kept before.
2002-11-24 Petr Baudis <pasky@ucw.cz>
Randy Dunlap <randy.dunlap@verizon.net>
Update for 2.5.49, description for most of the options introduced,
references to other documentation (C files, READMEs, ..), added S390,
PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and
reformatting.
2005-10-19 Randy Dunlap <rdunlap@xenotime.net>
Lots of typos, whitespace, some reformatting.
TODO: TODO:

View file

@ -254,7 +254,7 @@ and, the number of frames be
<block number> * <block size> / <frame size> <block number> * <block size> / <frame size>
Suposse the following parameters, which apply for 2.6 kernel and an Suppose the following parameters, which apply for 2.6 kernel and an
i386 architecture: i386 architecture:
<size-max> = 131072 bytes <size-max> = 131072 bytes

View file

@ -138,7 +138,7 @@ This means that you have to read/write IP packets when you are using tun and
ethernet frames when using tap. ethernet frames when using tap.
5. What is the difference between BPF and TUN/TAP driver? 5. What is the difference between BPF and TUN/TAP driver?
BFP is an advanced packet filter. It can be attached to existing BPF is an advanced packet filter. It can be attached to existing
network interface. It does not provide a virtual network interface. network interface. It does not provide a virtual network interface.
A TUN/TAP driver does provide a virtual network interface and it is possible A TUN/TAP driver does provide a virtual network interface and it is possible
to attach BPF to this interface. to attach BPF to this interface.

View file

@ -69,7 +69,7 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
* for the data I pass, and I need tags * for the data I pass, and I need tags
* on the data to indicate what information I have * on the data to indicate what information I have
* squirrelled away. ELF notes happen to provide * squirrelled away. ELF notes happen to provide
* all of that that no need to invent something new. * all of that, so there is no need to invent something new.
*/ */
buf = (u32*)per_cpu_ptr(crash_notes, cpu); buf = (u32*)per_cpu_ptr(crash_notes, cpu);
if (!buf) if (!buf)

View file

@ -1740,7 +1740,7 @@ EXPORT_SYMBOL(blk_run_queue);
/** /**
* blk_cleanup_queue: - release a &request_queue_t when it is no longer needed * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
* @q: the request queue to be released * @kobj: the kobj belonging of the request queue to be released
* *
* Description: * Description:
* blk_cleanup_queue is the pair to blk_init_queue() or * blk_cleanup_queue is the pair to blk_init_queue() or

View file

@ -78,8 +78,7 @@ void dm_put_target_type(struct target_type *t)
if (--ti->use == 0) if (--ti->use == 0)
module_put(ti->tt.module); module_put(ti->tt.module);
if (ti->use < 0) BUG_ON(ti->use < 0);
BUG();
up_read(&_lock); up_read(&_lock);
return; return;

View file

@ -1558,8 +1558,7 @@ static int init_resync(conf_t *conf)
int buffs; int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
if (conf->r1buf_pool) BUG_ON(conf->r1buf_pool);
BUG();
conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
conf->poolinfo); conf->poolinfo);
if (!conf->r1buf_pool) if (!conf->r1buf_pool)
@ -1732,8 +1731,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
!conf->fullsync && !conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break; break;
if (sync_blocks < (PAGE_SIZE>>9)) BUG_ON(sync_blocks < (PAGE_SIZE>>9));
BUG();
if (len > (sync_blocks<<9)) if (len > (sync_blocks<<9))
len = sync_blocks<<9; len = sync_blocks<<9;
} }

View file

@ -1117,8 +1117,7 @@ static int end_sync_read(struct bio *bio, unsigned int bytes_done, int error)
for (i=0; i<conf->copies; i++) for (i=0; i<conf->copies; i++)
if (r10_bio->devs[i].bio == bio) if (r10_bio->devs[i].bio == bio)
break; break;
if (i == conf->copies) BUG_ON(i == conf->copies);
BUG();
update_head_pos(i, r10_bio); update_head_pos(i, r10_bio);
d = r10_bio->devs[i].devnum; d = r10_bio->devs[i].devnum;
@ -1518,8 +1517,7 @@ static int init_resync(conf_t *conf)
int buffs; int buffs;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
if (conf->r10buf_pool) BUG_ON(conf->r10buf_pool);
BUG();
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf); conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool) if (!conf->r10buf_pool)
return -ENOMEM; return -ENOMEM;

View file

@ -73,10 +73,8 @@ static void print_raid5_conf (raid5_conf_t *conf);
static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
{ {
if (atomic_dec_and_test(&sh->count)) { if (atomic_dec_and_test(&sh->count)) {
if (!list_empty(&sh->lru)) BUG_ON(!list_empty(&sh->lru));
BUG(); BUG_ON(atomic_read(&conf->active_stripes)==0);
if (atomic_read(&conf->active_stripes)==0)
BUG();
if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state)) if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list); list_add_tail(&sh->lru, &conf->delayed_list);
@ -184,10 +182,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int
raid5_conf_t *conf = sh->raid_conf; raid5_conf_t *conf = sh->raid_conf;
int i; int i;
if (atomic_read(&sh->count) != 0) BUG_ON(atomic_read(&sh->count) != 0);
BUG(); BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
if (test_bit(STRIPE_HANDLE, &sh->state))
BUG();
CHECK_DEVLOCK(); CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n", PRINTK("init_stripe called, stripe %llu\n",
@ -269,8 +265,7 @@ static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector
init_stripe(sh, sector, pd_idx, disks); init_stripe(sh, sector, pd_idx, disks);
} else { } else {
if (atomic_read(&sh->count)) { if (atomic_read(&sh->count)) {
if (!list_empty(&sh->lru)) BUG_ON(!list_empty(&sh->lru));
BUG();
} else { } else {
if (!test_bit(STRIPE_HANDLE, &sh->state)) if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes); atomic_inc(&conf->active_stripes);
@ -465,8 +460,7 @@ static int drop_one_stripe(raid5_conf_t *conf)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (!sh) if (!sh)
return 0; return 0;
if (atomic_read(&sh->count)) BUG_ON(atomic_read(&sh->count));
BUG();
shrink_buffers(sh, conf->pool_size); shrink_buffers(sh, conf->pool_size);
kmem_cache_free(conf->slab_cache, sh); kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes); atomic_dec(&conf->active_stripes);
@ -882,8 +876,7 @@ static void compute_parity(struct stripe_head *sh, int method)
ptr[0] = page_address(sh->dev[pd_idx].page); ptr[0] = page_address(sh->dev[pd_idx].page);
switch(method) { switch(method) {
case READ_MODIFY_WRITE: case READ_MODIFY_WRITE:
if (!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags)) BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags));
BUG();
for (i=disks ; i-- ;) { for (i=disks ; i-- ;) {
if (i==pd_idx) if (i==pd_idx)
continue; continue;
@ -896,7 +889,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap); wake_up(&conf->wait_for_overlap);
if (sh->dev[i].written) BUG(); BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen; sh->dev[i].written = chosen;
check_xor(); check_xor();
} }
@ -912,7 +905,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap); wake_up(&conf->wait_for_overlap);
if (sh->dev[i].written) BUG(); BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen; sh->dev[i].written = chosen;
} }
break; break;
@ -995,8 +988,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap; goto overlap;
if (*bip && bi->bi_next && (*bip) != bi->bi_next) BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
BUG();
if (*bip) if (*bip)
bi->bi_next = *bip; bi->bi_next = *bip;
*bip = bi; *bip = bi;
@ -1430,8 +1422,7 @@ static void handle_stripe(struct stripe_head *sh)
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
if (failed == 0) { if (failed == 0) {
char *pagea; char *pagea;
if (uptodate != disks) BUG_ON(uptodate != disks);
BUG();
compute_parity(sh, CHECK_PARITY); compute_parity(sh, CHECK_PARITY);
uptodate--; uptodate--;
pagea = page_address(sh->dev[sh->pd_idx].page); pagea = page_address(sh->dev[sh->pd_idx].page);
@ -2096,8 +2087,7 @@ static void raid5d (mddev_t *mddev)
list_del_init(first); list_del_init(first);
atomic_inc(&sh->count); atomic_inc(&sh->count);
if (atomic_read(&sh->count)!= 1) BUG_ON(atomic_read(&sh->count)!= 1);
BUG();
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
handled++; handled++;

View file

@ -91,10 +91,8 @@ static void print_raid6_conf (raid6_conf_t *conf);
static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh) static void __release_stripe(raid6_conf_t *conf, struct stripe_head *sh)
{ {
if (atomic_dec_and_test(&sh->count)) { if (atomic_dec_and_test(&sh->count)) {
if (!list_empty(&sh->lru)) BUG_ON(!list_empty(&sh->lru));
BUG(); BUG_ON(atomic_read(&conf->active_stripes)==0);
if (atomic_read(&conf->active_stripes)==0)
BUG();
if (test_bit(STRIPE_HANDLE, &sh->state)) { if (test_bit(STRIPE_HANDLE, &sh->state)) {
if (test_bit(STRIPE_DELAYED, &sh->state)) if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list); list_add_tail(&sh->lru, &conf->delayed_list);
@ -202,10 +200,8 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx)
raid6_conf_t *conf = sh->raid_conf; raid6_conf_t *conf = sh->raid_conf;
int disks = conf->raid_disks, i; int disks = conf->raid_disks, i;
if (atomic_read(&sh->count) != 0) BUG_ON(atomic_read(&sh->count) != 0);
BUG(); BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
if (test_bit(STRIPE_HANDLE, &sh->state))
BUG();
CHECK_DEVLOCK(); CHECK_DEVLOCK();
PRINTK("init_stripe called, stripe %llu\n", PRINTK("init_stripe called, stripe %llu\n",
@ -284,13 +280,11 @@ static struct stripe_head *get_active_stripe(raid6_conf_t *conf, sector_t sector
init_stripe(sh, sector, pd_idx); init_stripe(sh, sector, pd_idx);
} else { } else {
if (atomic_read(&sh->count)) { if (atomic_read(&sh->count)) {
if (!list_empty(&sh->lru)) BUG_ON(!list_empty(&sh->lru));
BUG();
} else { } else {
if (!test_bit(STRIPE_HANDLE, &sh->state)) if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes); atomic_inc(&conf->active_stripes);
if (list_empty(&sh->lru)) BUG_ON(list_empty(&sh->lru));
BUG();
list_del_init(&sh->lru); list_del_init(&sh->lru);
} }
} }
@ -353,8 +347,7 @@ static int drop_one_stripe(raid6_conf_t *conf)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (!sh) if (!sh)
return 0; return 0;
if (atomic_read(&sh->count)) BUG_ON(atomic_read(&sh->count));
BUG();
shrink_buffers(sh, conf->raid_disks); shrink_buffers(sh, conf->raid_disks);
kmem_cache_free(conf->slab_cache, sh); kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes); atomic_dec(&conf->active_stripes);
@ -780,7 +773,7 @@ static void compute_parity(struct stripe_head *sh, int method)
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap); wake_up(&conf->wait_for_overlap);
if (sh->dev[i].written) BUG(); BUG_ON(sh->dev[i].written);
sh->dev[i].written = chosen; sh->dev[i].written = chosen;
} }
break; break;
@ -970,8 +963,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
goto overlap; goto overlap;
if (*bip && bi->bi_next && (*bip) != bi->bi_next) BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
BUG();
if (*bip) if (*bip)
bi->bi_next = *bip; bi->bi_next = *bip;
*bip = bi; *bip = bi;
@ -1906,8 +1898,7 @@ static void raid6d (mddev_t *mddev)
list_del_init(first); list_del_init(first);
atomic_inc(&sh->count); atomic_inc(&sh->count);
if (atomic_read(&sh->count)!= 1) BUG_ON(atomic_read(&sh->count)!= 1);
BUG();
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
handled++; handled++;

View file

@ -200,27 +200,6 @@ config MTD_CFI_AMDSTD
provides support for one of those command sets, used on chips provides support for one of those command sets, used on chips
including the AMD Am29LV320. including the AMD Am29LV320.
config MTD_CFI_AMDSTD_RETRY
int "Retry failed commands (erase/program)"
depends on MTD_CFI_AMDSTD
default "0"
help
Some chips, when attached to a shared bus, don't properly filter
bus traffic that is destined to other devices. This broken
behavior causes erase and program sequences to be aborted when
the sequences are mixed with traffic for other devices.
SST49LF040 (and related) chips are know to be broken.
config MTD_CFI_AMDSTD_RETRY_MAX
int "Max retries of failed commands (erase/program)"
depends on MTD_CFI_AMDSTD_RETRY
default "0"
help
If you have an SST49LF040 (or related chip) then this value should
be set to at least 1. This can also be adjusted at driver load
time with the retry_cmd_max module parameter.
config MTD_CFI_STAA config MTD_CFI_STAA
tristate "Support for ST (Advanced Architecture) flash chips" tristate "Support for ST (Advanced Architecture) flash chips"
depends on MTD_GEN_PROBE depends on MTD_GEN_PROBE

View file

@ -539,8 +539,7 @@ rx_status_loop:
unsigned buflen; unsigned buflen;
skb = cp->rx_skb[rx_tail].skb; skb = cp->rx_skb[rx_tail].skb;
if (!skb) BUG_ON(!skb);
BUG();
desc = &cp->rx_ring[rx_tail]; desc = &cp->rx_ring[rx_tail];
status = le32_to_cpu(desc->opts1); status = le32_to_cpu(desc->opts1);
@ -723,8 +722,7 @@ static void cp_tx (struct cp_private *cp)
break; break;
skb = cp->tx_skb[tx_tail].skb; skb = cp->tx_skb[tx_tail].skb;
if (!skb) BUG_ON(!skb);
BUG();
pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,
cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE);
@ -1550,8 +1548,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
tmp_stats[i++] = cp->cp_stats.rx_frags; tmp_stats[i++] = cp->cp_stats.rx_frags;
if (i != CP_NUM_STATS) BUG_ON(i != CP_NUM_STATS);
BUG();
pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma); pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
} }
@ -1856,8 +1853,7 @@ static void cp_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct cp_private *cp = netdev_priv(dev); struct cp_private *cp = netdev_priv(dev);
if (!dev) BUG_ON(!dev);
BUG();
unregister_netdev(dev); unregister_netdev(dev);
iounmap(cp->regs); iounmap(cp->regs);
if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0); if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);

View file

@ -765,8 +765,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
BUGMSG(D_DURING, "in arcnet_interrupt\n"); BUGMSG(D_DURING, "in arcnet_interrupt\n");
lp = dev->priv; lp = dev->priv;
if (!lp) BUG_ON(!lp);
BUG();
spin_lock(&lp->lock); spin_lock(&lp->lock);

View file

@ -608,8 +608,7 @@ static void b44_tx(struct b44 *bp)
struct ring_info *rp = &bp->tx_buffers[cons]; struct ring_info *rp = &bp->tx_buffers[cons];
struct sk_buff *skb = rp->skb; struct sk_buff *skb = rp->skb;
if (unlikely(skb == NULL)) BUG_ON(skb == NULL);
BUG();
pci_unmap_single(bp->pdev, pci_unmap_single(bp->pdev,
pci_unmap_addr(rp, mapping), pci_unmap_addr(rp, mapping),

View file

@ -1093,8 +1093,7 @@ static int process_responses(struct adapter *adapter, int budget)
if (likely(e->DataValid)) { if (likely(e->DataValid)) {
struct freelQ *fl = &sge->freelQ[e->FreelistQid]; struct freelQ *fl = &sge->freelQ[e->FreelistQid];
if (unlikely(!e->Sop || !e->Eop)) BUG_ON(!e->Sop || !e->Eop);
BUG();
if (unlikely(e->Offload)) if (unlikely(e->Offload))
unexpected_offload(adapter, fl); unexpected_offload(adapter, fl);
else else

View file

@ -3308,8 +3308,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
while (poll_dev != &adapter->polling_netdev[i]) { while (poll_dev != &adapter->polling_netdev[i]) {
i++; i++;
if (unlikely(i == adapter->num_rx_queues)) BUG_ON(i == adapter->num_rx_queues);
BUG();
} }
if (likely(adapter->num_tx_queues == 1)) { if (likely(adapter->num_tx_queues == 1)) {

View file

@ -203,8 +203,7 @@ static int eql_open(struct net_device *dev)
printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on "
"your slave devices.\n", dev->name); "your slave devices.\n", dev->name);
if (!list_empty(&eql->queue.all_slaves)) BUG_ON(!list_empty(&eql->queue.all_slaves));
BUG();
eql->min_slaves = 1; eql->min_slaves = 1;
eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */

View file

@ -695,8 +695,7 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
/* /*
* We must not be transmitting... * We must not be transmitting...
*/ */
if (si->txskb) BUG_ON(si->txskb);
BUG();
netif_stop_queue(dev); netif_stop_queue(dev);

View file

@ -645,9 +645,7 @@ static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
if (!dev) BUG_ON(!dev);
BUG();
unregister_netdev(dev); unregister_netdev(dev);
release_region(dev->base_addr, NE_IO_EXTENT); release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev); free_netdev(dev);

View file

@ -568,8 +568,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
#endif #endif
sg = dev->rx_info.descs + (next_empty * DESC_SIZE); sg = dev->rx_info.descs + (next_empty * DESC_SIZE);
if (unlikely(NULL != dev->rx_info.skbs[next_empty])) BUG_ON(NULL != dev->rx_info.skbs[next_empty]);
BUG();
dev->rx_info.skbs[next_empty] = skb; dev->rx_info.skbs[next_empty] = skb;
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;

View file

@ -2122,8 +2122,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct netdev_private *np = netdev_priv(dev); struct netdev_private *np = netdev_priv(dev);
if (!dev) BUG_ON(!dev);
BUG();
unregister_netdev(dev); unregister_netdev(dev);

View file

@ -2959,9 +2959,7 @@ static void tg3_tx(struct tg3 *tp)
struct sk_buff *skb = ri->skb; struct sk_buff *skb = ri->skb;
int i; int i;
if (unlikely(skb == NULL)) BUG_ON(skb == NULL);
BUG();
pci_unmap_single(tp->pdev, pci_unmap_single(tp->pdev,
pci_unmap_addr(ri, mapping), pci_unmap_addr(ri, mapping),
skb_headlen(skb), skb_headlen(skb),
@ -2972,12 +2970,10 @@ static void tg3_tx(struct tg3 *tp)
sw_idx = NEXT_TX(sw_idx); sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
if (unlikely(sw_idx == hw_idx)) BUG_ON(sw_idx == hw_idx);
BUG();
ri = &tp->tx_buffers[sw_idx]; ri = &tp->tx_buffers[sw_idx];
if (unlikely(ri->skb != NULL)) BUG_ON(ri->skb != NULL);
BUG();
pci_unmap_page(tp->pdev, pci_unmap_page(tp->pdev,
pci_unmap_addr(ri, mapping), pci_unmap_addr(ri, mapping),
@ -4928,9 +4924,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
{ {
int i; int i;
if (offset == TX_CPU_BASE && BUG_ON(offset == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
BUG();
if (offset == RX_CPU_BASE) { if (offset == RX_CPU_BASE) {
for (i = 0; i < 10000; i++) { for (i = 0; i < 10000; i++) {

View file

@ -438,8 +438,7 @@ static void __devexit abyss_detach (struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
if (!dev) BUG_ON(!dev);
BUG();
unregister_netdev(dev); unregister_netdev(dev);
release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT); release_region(dev->base_addr-0x10, ABYSS_IO_EXTENT);
free_irq(dev->irq, dev); free_irq(dev->irq, dev);

View file

@ -735,8 +735,7 @@ static int __devexit madgemc_remove(struct device *device)
struct net_local *tp; struct net_local *tp;
struct card_info *card; struct card_info *card;
if (!dev) BUG_ON(!dev);
BUG();
tp = dev->priv; tp = dev->priv;
card = tp->tmspriv; card = tp->tmspriv;

View file

@ -5573,8 +5573,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
case IEEE80211_52GHZ_BAND: case IEEE80211_52GHZ_BAND:
network->mode = IEEE_A; network->mode = IEEE_A;
i = ieee80211_channel_to_index(priv->ieee, priv->channel); i = ieee80211_channel_to_index(priv->ieee, priv->channel);
if (i == -1) BUG_ON(i == -1);
BUG();
if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) { if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
IPW_WARNING("Overriding invalid channel\n"); IPW_WARNING("Overriding invalid channel\n");
priv->channel = geo->a[0].channel; priv->channel = geo->a[0].channel;
@ -5587,8 +5586,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
else else
network->mode = IEEE_B; network->mode = IEEE_B;
i = ieee80211_channel_to_index(priv->ieee, priv->channel); i = ieee80211_channel_to_index(priv->ieee, priv->channel);
if (i == -1) BUG_ON(i == -1);
BUG();
if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) { if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
IPW_WARNING("Overriding invalid channel\n"); IPW_WARNING("Overriding invalid channel\n");
priv->channel = geo->bg[0].channel; priv->channel = geo->bg[0].channel;
@ -6715,8 +6713,7 @@ static int ipw_qos_association(struct ipw_priv *priv,
switch (priv->ieee->iw_mode) { switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: case IW_MODE_ADHOC:
if (!(network->capability & WLAN_CAPABILITY_IBSS)) BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
BUG();
qos_data = &ibss_data; qos_data = &ibss_data;
break; break;

View file

@ -1441,8 +1441,7 @@ static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct yellowfin_private *np; struct yellowfin_private *np;
if (!dev) BUG_ON(!dev);
BUG();
np = netdev_priv(dev); np = netdev_priv(dev);
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,

View file

@ -32,9 +32,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
int size; int size;
/* Sanity checks */ /* Sanity checks */
if ( magic == NULL || datasize > PAGE_SIZE || BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE) (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
BUG();
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0) if (cplength > 0)
@ -125,8 +124,7 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
struct dasd_device *device; struct dasd_device *device;
int success; int success;
if (cqr->refers == NULL || cqr->function == NULL) BUG_ON(cqr->refers == NULL || cqr->function == NULL);
BUG();
device = cqr->device; device = cqr->device;
success = cqr->status == DASD_CQR_DONE; success = cqr->status == DASD_CQR_DONE;

View file

@ -24,7 +24,7 @@
/* /*
* The room for the SCCB (only for writing) is not equal to a pages size * The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the the SCLP ducumentation) * (as it is specified as the maximum size in the the SCLP documentation)
* because of the additional data structure described above. * because of the additional data structure described above.
*/ */
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))

View file

@ -198,9 +198,7 @@ tapeblock_request_fn(request_queue_t *queue)
device = (struct tape_device *) queue->queuedata; device = (struct tape_device *) queue->queuedata;
DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device); DBF_LH(6, "tapeblock_request_fn(device=%p)\n", device);
if (device == NULL) BUG_ON(device == NULL);
BUG();
tapeblock_trigger_requeue(device); tapeblock_trigger_requeue(device);
} }
@ -307,8 +305,7 @@ tapeblock_revalidate_disk(struct gendisk *disk)
int rc; int rc;
device = (struct tape_device *) disk->private_data; device = (struct tape_device *) disk->private_data;
if (!device) BUG_ON(!device);
BUG();
if (!device->blk_data.medium_changed) if (!device->blk_data.medium_changed)
return 0; return 0;
@ -440,11 +437,9 @@ tapeblock_ioctl(
rc = 0; rc = 0;
disk = inode->i_bdev->bd_disk; disk = inode->i_bdev->bd_disk;
if (!disk) BUG_ON(!disk);
BUG();
device = disk->private_data; device = disk->private_data;
if (!device) BUG_ON(!device);
BUG();
minor = iminor(inode); minor = iminor(inode);
DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command); DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);

View file

@ -675,9 +675,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, rc; int index, rc;
LCS_DBF_TEXT(5, trace, "rdybuff"); LCS_DBF_TEXT(5, trace, "rdybuff");
if (buffer->state != BUF_STATE_LOCKED && BUG_ON(buffer->state != BUF_STATE_LOCKED &&
buffer->state != BUF_STATE_PROCESSED) buffer->state != BUF_STATE_PROCESSED);
BUG();
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_READY; buffer->state = BUF_STATE_READY;
index = buffer - channel->iob; index = buffer - channel->iob;
@ -701,8 +700,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
int index, prev, next; int index, prev, next;
LCS_DBF_TEXT(5, trace, "prcsbuff"); LCS_DBF_TEXT(5, trace, "prcsbuff");
if (buffer->state != BUF_STATE_READY) BUG_ON(buffer->state != BUF_STATE_READY);
BUG();
buffer->state = BUF_STATE_PROCESSED; buffer->state = BUF_STATE_PROCESSED;
index = buffer - channel->iob; index = buffer - channel->iob;
prev = (index - 1) & (LCS_NUM_BUFFS - 1); prev = (index - 1) & (LCS_NUM_BUFFS - 1);
@ -734,9 +732,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
unsigned long flags; unsigned long flags;
LCS_DBF_TEXT(5, trace, "relbuff"); LCS_DBF_TEXT(5, trace, "relbuff");
if (buffer->state != BUF_STATE_LOCKED && BUG_ON(buffer->state != BUF_STATE_LOCKED &&
buffer->state != BUF_STATE_PROCESSED) buffer->state != BUF_STATE_PROCESSED);
BUG();
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
buffer->state = BUF_STATE_EMPTY; buffer->state = BUF_STATE_EMPTY;
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);

View file

@ -86,7 +86,7 @@ config AIC7XXX_DEBUG_MASK
default "0" default "0"
help help
Bit mask of debug options that is only valid if the Bit mask of debug options that is only valid if the
CONFIG_AIC7XXX_DEBUG_ENBLE option is enabled. The bits in this mask CONFIG_AIC7XXX_DEBUG_ENABLE option is enabled. The bits in this mask
are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the are defined in the drivers/scsi/aic7xxx/aic7xxx.h - search for the
variable ahc_debug in that file to find them. variable ahc_debug in that file to find them.

View file

@ -20,7 +20,7 @@
* *
* Contact Information: * Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com> * Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> * Wendy Xiong <wendyx@us.ibm.com>
* *
***********************************************************************/ ***********************************************************************/

View file

@ -20,7 +20,7 @@
* *
* Contact Information: * Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com> * Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> * Wendy Xiong <wendyx@us.ibm.com>
* *
* *
***********************************************************************/ ***********************************************************************/

View file

@ -20,7 +20,7 @@
* *
* Contact Information: * Contact Information:
* Scott H Kilau <Scott_Kilau@digi.com> * Scott H Kilau <Scott_Kilau@digi.com>
* Wendy Xiong <wendyx@us.ltcfwd.linux.ibm.com> * Wendy Xiong <wendyx@us.ibm.com>
* *
***********************************************************************/ ***********************************************************************/
#include <linux/delay.h> /* For udelay */ #include <linux/delay.h> /* For udelay */

View file

@ -929,8 +929,7 @@ do_holes:
block_in_page += this_chunk_blocks; block_in_page += this_chunk_blocks;
dio->blocks_available -= this_chunk_blocks; dio->blocks_available -= this_chunk_blocks;
next_block: next_block:
if (dio->block_in_file > dio->final_block_in_request) BUG_ON(dio->block_in_file > dio->final_block_in_request);
BUG();
if (dio->block_in_file == dio->final_block_in_request) if (dio->block_in_file == dio->final_block_in_request)
break; break;
} }

View file

@ -590,8 +590,7 @@ we_slept:
atomic_dec(&dquot->dq_count); atomic_dec(&dquot->dq_count);
#ifdef __DQUOT_PARANOIA #ifdef __DQUOT_PARANOIA
/* sanity check */ /* sanity check */
if (!list_empty(&dquot->dq_free)) BUG_ON(!list_empty(&dquot->dq_free));
BUG();
#endif #endif
put_dquot_last(dquot); put_dquot_last(dquot);
spin_unlock(&dq_list_lock); spin_unlock(&dq_list_lock);
@ -666,8 +665,7 @@ we_slept:
return NODQUOT; return NODQUOT;
} }
#ifdef __DQUOT_PARANOIA #ifdef __DQUOT_PARANOIA
if (!dquot->dq_sb) /* Has somebody invalidated entry under us? */ BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
BUG();
#endif #endif
return dquot; return dquot;

View file

@ -561,7 +561,7 @@ static int exec_mmap(struct mm_struct *mm)
arch_pick_mmap_layout(mm); arch_pick_mmap_layout(mm);
if (old_mm) { if (old_mm) {
up_read(&old_mm->mmap_sem); up_read(&old_mm->mmap_sem);
if (active_mm != old_mm) BUG(); BUG_ON(active_mm != old_mm);
mmput(old_mm); mmput(old_mm);
return 0; return 0;
} }

View file

@ -453,8 +453,7 @@ static void send_sigio_to_task(struct task_struct *p,
/* Make sure we are called with one of the POLL_* /* Make sure we are called with one of the POLL_*
reasons, otherwise we could leak kernel stack into reasons, otherwise we could leak kernel stack into
userspace. */ userspace. */
if ((reason & __SI_MASK) != __SI_POLL) BUG_ON((reason & __SI_MASK) != __SI_POLL);
BUG();
if (reason - POLL_IN >= NSIGPOLL) if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L; si.si_band = ~0L;
else else

View file

@ -42,24 +42,21 @@
static inline void static inline void
vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp) vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
{ {
if (infp->vsi_fshino) BUG_ON(infp->vsi_fshino);
BUG();
infp->vsi_fshino = fshp->olt_fsino[0]; infp->vsi_fshino = fshp->olt_fsino[0];
} }
static inline void static inline void
vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp) vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
{ {
if (infp->vsi_iext) BUG_ON(infp->vsi_iext);
BUG();
infp->vsi_iext = ilistp->olt_iext[0]; infp->vsi_iext = ilistp->olt_iext[0];
} }
static inline u_long static inline u_long
vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize) vxfs_oblock(struct super_block *sbp, daddr_t block, u_long bsize)
{ {
if (sbp->s_blocksize % bsize) BUG_ON(sbp->s_blocksize % bsize);
BUG();
return (block * (sbp->s_blocksize / bsize)); return (block * (sbp->s_blocksize / bsize));
} }

View file

@ -466,8 +466,7 @@ void hfs_bnode_unhash(struct hfs_bnode *node)
for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
*p && *p != node; p = &(*p)->next_hash) *p && *p != node; p = &(*p)->next_hash)
; ;
if (!*p) BUG_ON(!*p);
BUG();
*p = node->next_hash; *p = node->next_hash;
node->tree->node_hash_cnt--; node->tree->node_hash_cnt--;
} }
@ -622,8 +621,7 @@ void hfs_bnode_put(struct hfs_bnode *node)
dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n", dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
node->tree->cnid, node->this, atomic_read(&node->refcnt)); node->tree->cnid, node->this, atomic_read(&node->refcnt));
if (!atomic_read(&node->refcnt)) BUG_ON(!atomic_read(&node->refcnt));
BUG();
if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
return; return;
for (i = 0; i < tree->pages_per_bnode; i++) { for (i = 0; i < tree->pages_per_bnode; i++) {

View file

@ -269,8 +269,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
u8 *data, byte, m; u8 *data, byte, m;
dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this); dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
if (!node->this) BUG_ON(!node->this);
BUG();
tree = node->tree; tree = node->tree;
nidx = node->this; nidx = node->this;
node = hfs_bnode_find(tree, 0); node = hfs_bnode_find(tree, 0);

View file

@ -172,8 +172,7 @@ static struct inode *alloc_inode(struct super_block *sb)
void destroy_inode(struct inode *inode) void destroy_inode(struct inode *inode)
{ {
if (inode_has_buffers(inode)) BUG_ON(inode_has_buffers(inode));
BUG();
security_inode_free(inode); security_inode_free(inode);
if (inode->i_sb->s_op->destroy_inode) if (inode->i_sb->s_op->destroy_inode)
inode->i_sb->s_op->destroy_inode(inode); inode->i_sb->s_op->destroy_inode(inode);
@ -249,12 +248,9 @@ void clear_inode(struct inode *inode)
might_sleep(); might_sleep();
invalidate_inode_buffers(inode); invalidate_inode_buffers(inode);
if (inode->i_data.nrpages) BUG_ON(inode->i_data.nrpages);
BUG(); BUG_ON(!(inode->i_state & I_FREEING));
if (!(inode->i_state & I_FREEING)) BUG_ON(inode->i_state & I_CLEAR);
BUG();
if (inode->i_state & I_CLEAR)
BUG();
wait_on_inode(inode); wait_on_inode(inode);
DQUOT_DROP(inode); DQUOT_DROP(inode);
if (inode->i_sb && inode->i_sb->s_op->clear_inode) if (inode->i_sb && inode->i_sb->s_op->clear_inode)
@ -1054,8 +1050,7 @@ void generic_delete_inode(struct inode *inode)
hlist_del_init(&inode->i_hash); hlist_del_init(&inode->i_hash);
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
wake_up_inode(inode); wake_up_inode(inode);
if (inode->i_state != I_CLEAR) BUG_ON(inode->i_state != I_CLEAR);
BUG();
destroy_inode(inode); destroy_inode(inode);
} }

View file

@ -35,8 +35,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
pid_t pid; pid_t pid;
int ret = 0; int ret = 0;
if (c->gc_task) BUG_ON(c->gc_task);
BUG();
init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit); init_completion(&c->gc_thread_exit);

View file

@ -178,11 +178,9 @@ smb_writepage(struct page *page, struct writeback_control *wbc)
unsigned offset = PAGE_CACHE_SIZE; unsigned offset = PAGE_CACHE_SIZE;
int err; int err;
if (!mapping) BUG_ON(!mapping);
BUG();
inode = mapping->host; inode = mapping->host;
if (!inode) BUG_ON(!inode);
BUG();
end_index = inode->i_size >> PAGE_CACHE_SHIFT; end_index = inode->i_size >> PAGE_CACHE_SHIFT;

View file

@ -50,7 +50,7 @@ static struct sysfs_dirent * sysfs_new_dirent(struct sysfs_dirent * parent_sd,
return sd; return sd;
} }
/** /*
* *
* Return -EEXIST if there is already a sysfs element with the same name for * Return -EEXIST if there is already a sysfs element with the same name for
* the same parent. * the same parent.

View file

@ -175,8 +175,7 @@ const unsigned char * sysfs_get_name(struct sysfs_dirent *sd)
struct bin_attribute * bin_attr; struct bin_attribute * bin_attr;
struct sysfs_symlink * sl; struct sysfs_symlink * sl;
if (!sd || !sd->s_element) BUG_ON(!sd || !sd->s_element);
BUG();
switch (sd->s_type) { switch (sd->s_type) {
case SYSFS_DIR: case SYSFS_DIR:

View file

@ -253,8 +253,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
lock_page(page); lock_page(page);
err = mapping->a_ops->prepare_write(NULL, page, from, to); err = mapping->a_ops->prepare_write(NULL, page, from, to);
if (err) BUG_ON(err);
BUG();
de->inode = 0; de->inode = 0;
err = dir_commit_chunk(page, from, to); err = dir_commit_chunk(page, from, to);
dir_put_page(page); dir_put_page(page);
@ -353,8 +352,7 @@ void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
lock_page(page); lock_page(page);
err = page->mapping->a_ops->prepare_write(NULL, page, from, to); err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
if (err) BUG_ON(err);
BUG();
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
err = dir_commit_chunk(page, from, to); err = dir_commit_chunk(page, from, to);
dir_put_page(page); dir_put_page(page);

View file

@ -312,12 +312,10 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
err = 0; err = 0;
bh = inode_getblk(inode, block, &err, &phys, &new); bh = inode_getblk(inode, block, &err, &phys, &new);
if (bh) BUG_ON(bh);
BUG();
if (err) if (err)
goto abort; goto abort;
if (!phys) BUG_ON(!phys);
BUG();
if (new) if (new)
set_buffer_new(bh_result); set_buffer_new(bh_result);

View file

@ -864,7 +864,7 @@ struct super_block {
*/ */
struct mutex s_vfs_rename_mutex; /* Kludge */ struct mutex s_vfs_rename_mutex; /* Kludge */
/* Granuality of c/m/atime in ns. /* Granularity of c/m/atime in ns.
Cannot be worse than a second */ Cannot be worse than a second */
u32 s_time_gran; u32 s_time_gran;
}; };

View file

@ -80,7 +80,7 @@ struct hrtimer_sleeper {
* @first: pointer to the timer node which expires first * @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds * @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock * @get_time: function to retrieve the current time of the clock
* @get_sofirq_time: function to retrieve the current time from the softirq * @get_softirq_time: function to retrieve the current time from the softirq
* @curr_timer: the timer which is executing a callback right now * @curr_timer: the timer which is executing a callback right now
* @softirq_time: the time when running the hrtimer queue in the softirq * @softirq_time: the time when running the hrtimer queue in the softirq
*/ */

View file

@ -91,8 +91,8 @@ static inline int shm_addid(struct shmid_kernel *shp)
static inline void shm_inc (int id) { static inline void shm_inc (int id) {
struct shmid_kernel *shp; struct shmid_kernel *shp;
if(!(shp = shm_lock(id))) shp = shm_lock(id);
BUG(); BUG_ON(!shp);
shp->shm_atim = get_seconds(); shp->shm_atim = get_seconds();
shp->shm_lprid = current->tgid; shp->shm_lprid = current->tgid;
shp->shm_nattch++; shp->shm_nattch++;
@ -142,8 +142,8 @@ static void shm_close (struct vm_area_struct *shmd)
mutex_lock(&shm_ids.mutex); mutex_lock(&shm_ids.mutex);
/* remove from the list of attaches of the shm segment */ /* remove from the list of attaches of the shm segment */
if(!(shp = shm_lock(id))) shp = shm_lock(id);
BUG(); BUG_ON(!shp);
shp->shm_lprid = current->tgid; shp->shm_lprid = current->tgid;
shp->shm_dtim = get_seconds(); shp->shm_dtim = get_seconds();
shp->shm_nattch--; shp->shm_nattch--;
@ -283,8 +283,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
err = -EEXIST; err = -EEXIST;
} else { } else {
shp = shm_lock(id); shp = shm_lock(id);
if(shp==NULL) BUG_ON(shp==NULL);
BUG();
if (shp->shm_segsz < size) if (shp->shm_segsz < size)
err = -EINVAL; err = -EINVAL;
else if (ipcperms(&shp->shm_perm, shmflg)) else if (ipcperms(&shp->shm_perm, shmflg))
@ -774,8 +773,8 @@ invalid:
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
mutex_lock(&shm_ids.mutex); mutex_lock(&shm_ids.mutex);
if(!(shp = shm_lock(shmid))) shp = shm_lock(shmid);
BUG(); BUG_ON(!shp);
shp->shm_nattch--; shp->shm_nattch--;
if(shp->shm_nattch == 0 && if(shp->shm_nattch == 0 &&
shp->shm_perm.mode & SHM_DEST) shp->shm_perm.mode & SHM_DEST)

View file

@ -266,8 +266,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
{ {
struct kern_ipc_perm* p; struct kern_ipc_perm* p;
int lid = id % SEQ_MULTIPLIER; int lid = id % SEQ_MULTIPLIER;
if(lid >= ids->entries->size) BUG_ON(lid >= ids->entries->size);
BUG();
/* /*
* do not need a rcu_dereference()() here to force ordering * do not need a rcu_dereference()() here to force ordering
@ -275,8 +274,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
*/ */
p = ids->entries->p[lid]; p = ids->entries->p[lid];
ids->entries->p[lid] = NULL; ids->entries->p[lid] = NULL;
if(p==NULL) BUG_ON(p==NULL);
BUG();
ids->in_use--; ids->in_use--;
if (lid == ids->max_id) { if (lid == ids->max_id) {

View file

@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
---help--- ---help---
Enable the possibility of suspending the machine. Enable the possibility of suspending the machine.
It doesn't need APM. It doesn't need ACPI or APM.
You may suspend your machine by 'swsusp' or 'shutdown -z <time>' You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
(patch for sysvinit needed). (patch for sysvinit needed).

View file

@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
unsigned long cur_index, start_print; unsigned long cur_index, start_print;
static int msg_level = -1; static int msg_level = -1;
if (((long)(start - end)) > 0) BUG_ON(((long)(start - end)) > 0);
BUG();
cur_index = start; cur_index = start;
start_print = start; start_print = start;
@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
*/ */
void acquire_console_sem(void) void acquire_console_sem(void)
{ {
if (in_interrupt()) BUG_ON(in_interrupt());
BUG();
down(&console_sem); down(&console_sem);
console_locked = 1; console_locked = 1;
console_may_schedule = 1; console_may_schedule = 1;

View file

@ -30,8 +30,7 @@
*/ */
void __ptrace_link(task_t *child, task_t *new_parent) void __ptrace_link(task_t *child, task_t *new_parent)
{ {
if (!list_empty(&child->ptrace_list)) BUG_ON(!list_empty(&child->ptrace_list));
BUG();
if (child->parent == new_parent) if (child->parent == new_parent)
return; return;
list_add(&child->ptrace_list, &child->parent->ptrace_children); list_add(&child->ptrace_list, &child->parent->ptrace_children);

View file

@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{ {
int ret = 0; int ret = 0;
if (!irqs_disabled()) BUG_ON(!irqs_disabled());
BUG();
assert_spin_locked(&t->sighand->siglock); assert_spin_locked(&t->sighand->siglock);
/* Short-circuit ignored signals. */ /* Short-circuit ignored signals. */
@ -1384,8 +1383,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
* the overrun count. Other uses should not try to * the overrun count. Other uses should not try to
* send the signal multiple times. * send the signal multiple times.
*/ */
if (q->info.si_code != SI_TIMER) BUG_ON(q->info.si_code != SI_TIMER);
BUG();
q->info.si_overrun++; q->info.si_overrun++;
goto out; goto out;
} }

View file

@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
* current_fs_time - Return FS time * current_fs_time - Return FS time
* @sb: Superblock. * @sb: Superblock.
* *
* Return the current time truncated to the time granuality supported by * Return the current time truncated to the time granularity supported by
* the fs. * the fs.
*/ */
struct timespec current_fs_time(struct super_block *sb) struct timespec current_fs_time(struct super_block *sb)
@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
EXPORT_SYMBOL(current_fs_time); EXPORT_SYMBOL(current_fs_time);
/** /**
* timespec_trunc - Truncate timespec to a granuality * timespec_trunc - Truncate timespec to a granularity
* @t: Timespec * @t: Timespec
* @gran: Granuality in ns. * @gran: Granularity in ns.
* *
* Truncate a timespec to a granuality. gran must be smaller than a second. * Truncate a timespec to a granularity. gran must be smaller than a second.
* Always rounds down. * Always rounds down.
* *
* This function should be only used for timestamps returned by * This function should be only used for timestamps returned by

View file

@ -1479,8 +1479,7 @@ register_time_interpolator(struct time_interpolator *ti)
unsigned long flags; unsigned long flags;
/* Sanity check */ /* Sanity check */
if (ti->frequency == 0 || ti->mask == 0) BUG_ON(ti->frequency == 0 || ti->mask == 0);
BUG();
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock); spin_lock(&time_interpolator_lock);

View file

@ -74,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
pkmap_count[i] = 0; pkmap_count[i] = 0;
/* sanity check */ /* sanity check */
if (pte_none(pkmap_page_table[i])) BUG_ON(pte_none(pkmap_page_table[i]));
BUG();
/* /*
* Don't need an atomic fetch-and-clear op here; * Don't need an atomic fetch-and-clear op here;
@ -158,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
if (!vaddr) if (!vaddr)
vaddr = map_new_virtual(page); vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++; pkmap_count[PKMAP_NR(vaddr)]++;
if (pkmap_count[PKMAP_NR(vaddr)] < 2) BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
BUG();
spin_unlock(&kmap_lock); spin_unlock(&kmap_lock);
return (void*) vaddr; return (void*) vaddr;
} }
@ -174,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
spin_lock(&kmap_lock); spin_lock(&kmap_lock);
vaddr = (unsigned long)page_address(page); vaddr = (unsigned long)page_address(page);
if (!vaddr) BUG_ON(!vaddr);
BUG();
nr = PKMAP_NR(vaddr); nr = PKMAP_NR(vaddr);
/* /*
@ -220,8 +217,7 @@ static __init int init_emergency_pool(void)
return 0; return 0;
page_pool = mempool_create_page_pool(POOL_SIZE, 0); page_pool = mempool_create_page_pool(POOL_SIZE, 0);
if (!page_pool) BUG_ON(!page_pool);
BUG();
printk("highmem bounce pool size: %d pages\n", POOL_SIZE); printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
return 0; return 0;
@ -264,8 +260,7 @@ int init_emergency_isa_pool(void)
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0); mempool_free_pages, (void *) 0);
if (!isa_page_pool) BUG_ON(!isa_page_pool);
BUG();
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
return 0; return 0;

View file

@ -294,8 +294,7 @@ void validate_mm(struct mm_struct *mm)
i = browse_rb(&mm->mm_rb); i = browse_rb(&mm->mm_rb);
if (i != mm->map_count) if (i != mm->map_count)
printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
if (bug) BUG_ON(bug);
BUG();
} }
#else #else
#define validate_mm(mm) do { } while (0) #define validate_mm(mm) do { } while (0)
@ -432,8 +431,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
struct rb_node ** rb_link, * rb_parent; struct rb_node ** rb_link, * rb_parent;
__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
if (__vma && __vma->vm_start < vma->vm_end) BUG_ON(__vma && __vma->vm_start < vma->vm_end);
BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent); __vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++; mm->map_count++;
} }
@ -813,8 +811,7 @@ try_prev:
* (e.g. stash info in next's anon_vma_node when assigning * (e.g. stash info in next's anon_vma_node when assigning
* an anon_vma, or when trying vma_merge). Another time. * an anon_vma, or when trying vma_merge). Another time.
*/ */
if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma) BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
BUG();
if (!near) if (!near)
goto none; goto none;

View file

@ -258,7 +258,7 @@ static void balance_dirty_pages(struct address_space *mapping)
/** /**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state * balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied * @mapping: address_space which was dirtied
* @nr_pages: number of pages which the caller has just dirtied * @nr_pages_dirtied: number of pages which the caller has just dirtied
* *
* Processes which are dirtying memory should call in here once for each page * Processes which are dirtying memory should call in here once for each page
* which was newly dirtied. The function will periodically check the system's * which was newly dirtied. The function will periodically check the system's

View file

@ -1297,8 +1297,7 @@ void __init kmem_cache_init(void)
if (cache_cache.num) if (cache_cache.num)
break; break;
} }
if (!cache_cache.num) BUG_ON(!cache_cache.num);
BUG();
cache_cache.gfporder = order; cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@ -1974,8 +1973,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Always checks flags, a caller might be expecting debug support which * Always checks flags, a caller might be expecting debug support which
* isn't available. * isn't available.
*/ */
if (flags & ~CREATE_MASK) BUG_ON(flags & ~CREATE_MASK);
BUG();
/* /*
* Check that size is in terms of words. This is needed to avoid * Check that size is in terms of words. This is needed to avoid
@ -2206,8 +2204,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
slabp = list_entry(l3->slabs_free.prev, struct slab, list); slabp = list_entry(l3->slabs_free.prev, struct slab, list);
#if DEBUG #if DEBUG
if (slabp->inuse) BUG_ON(slabp->inuse);
BUG();
#endif #endif
list_del(&slabp->list); list_del(&slabp->list);
@ -2248,8 +2245,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
*/ */
int kmem_cache_shrink(struct kmem_cache *cachep) int kmem_cache_shrink(struct kmem_cache *cachep)
{ {
if (!cachep || in_interrupt()) BUG_ON(!cachep || in_interrupt());
BUG();
return __cache_shrink(cachep); return __cache_shrink(cachep);
} }
@ -2277,8 +2273,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
int i; int i;
struct kmem_list3 *l3; struct kmem_list3 *l3;
if (!cachep || in_interrupt()) BUG_ON(!cachep || in_interrupt());
BUG();
/* Don't let CPUs to come and go */ /* Don't let CPUs to come and go */
lock_cpu_hotplug(); lock_cpu_hotplug();
@ -2477,8 +2472,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
* Be lazy and only check for valid flags here, keeping it out of the * Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc(). * critical path in kmem_cache_alloc().
*/ */
if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)) BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
BUG();
if (flags & SLAB_NO_GROW) if (flags & SLAB_NO_GROW)
return 0; return 0;

View file

@ -148,8 +148,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
swp_entry_t entry; swp_entry_t entry;
int err; int err;
if (!PageLocked(page)) BUG_ON(!PageLocked(page));
BUG();
for (;;) { for (;;) {
entry = get_swap_page(); entry = get_swap_page();

View file

@ -321,8 +321,7 @@ void __vunmap(void *addr, int deallocate_pages)
int i; int i;
for (i = 0; i < area->nr_pages; i++) { for (i = 0; i < area->nr_pages; i++) {
if (unlikely(!area->pages[i])) BUG_ON(!area->pages[i]);
BUG();
__free_page(area->pages[i]); __free_page(area->pages[i]);
} }