1
0
Fork 0

Merge ../linux-2.6

hifive-unleashed-5.1
James Bottomley 2006-08-27 21:59:59 -05:00
commit 8ce7a9c159
867 changed files with 22111 additions and 9015 deletions

5
.gitignore vendored
View File

@ -30,6 +30,11 @@ include/config
include/linux/autoconf.h
include/linux/compile.h
include/linux/version.h
include/linux/utsrelease.h
# stgit generated dirs
patches-*
# quilt's files
patches
series

View File

@ -2209,7 +2209,7 @@ S: (address available on request)
S: USA
N: Ian McDonald
E: iam4@cs.waikato.ac.nz
E: ian.mcdonald@jandi.co.nz
E: imcdnzl@gmail.com
W: http://wand.net.nz/~iam4
W: http://imcdnzl.blogspot.com

View File

@ -58,6 +58,9 @@
!Iinclude/linux/ktime.h
!Iinclude/linux/hrtimer.h
!Ekernel/hrtimer.c
</sect1>
<sect1><title>Workqueues and Kevents</title>
!Ekernel/workqueue.c
</sect1>
<sect1><title>Internal Functions</title>
!Ikernel/exit.c
@ -300,7 +303,7 @@ X!Ekernel/module.c
</sect1>
<sect1><title>Resources Management</title>
!Ekernel/resource.c
!Ikernel/resource.c
</sect1>
<sect1><title>MTRR Handling</title>
@ -312,9 +315,7 @@ X!Ekernel/module.c
!Edrivers/pci/pci-driver.c
!Edrivers/pci/remove.c
!Edrivers/pci/pci-acpi.c
<!-- kerneldoc does not understand __devinit
X!Edrivers/pci/search.c
-->
!Edrivers/pci/search.c
!Edrivers/pci/msi.c
!Edrivers/pci/bus.c
<!-- FIXME: Removed for now since no structured comments in source

View File

@ -10,7 +10,9 @@ kernel, the process can sometimes be daunting if you're not familiar
with "the system." This text is a collection of suggestions which
can greatly increase the chances of your change being accepted.
If you are submitting a driver, also read Documentation/SubmittingDrivers.
Read Documentation/SubmitChecklist for a list of items to check
before submitting code. If you are submitting a driver, also read
Documentation/SubmittingDrivers.
@ -74,9 +76,6 @@ There are a number of scripts which can aid in this:
Quilt:
http://savannah.nongnu.org/projects/quilt
Randy Dunlap's patch scripts:
http://www.xenotime.net/linux/scripts/patching-scripts-002.tar.gz
Andrew Morton's patch scripts:
http://www.zip.com.au/~akpm/linux/patches/
Instead of these scripts, quilt is the recommended patch management
@ -484,7 +483,7 @@ Greg Kroah-Hartman "How to piss off a kernel subsystem maintainer".
<http://www.kroah.com/log/2005/10/19/>
<http://www.kroah.com/log/2006/01/11/>
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!.
NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
<http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2>
Kernel Documentation/CodingStyle
@ -493,4 +492,3 @@ Kernel Documentation/CodingStyle
Linus Torvald's mail on the canonical patch format:
<http://lkml.org/lkml/2005/4/7/183>
--
Last updated on 17 Nov 2005.

View File

@ -64,11 +64,13 @@ Compile the kernel with
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASKSTATS=y
Enable the accounting at boot time by adding
the following to the kernel boot options
delayacct
Delay accounting is enabled by default at boot up.
To disable, add
nodelayacct
to the kernel boot options. The rest of the instructions
below assume this has not been done.
and after the system has booted up, use a utility
After the system has booted up, use a utility
similar to getdelays.c to access the delays
seen by a given task or a task group (tgid).
The utility also allows a given command to be

View File

@ -0,0 +1,206 @@
/*
* ucon.c
*
* Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/types.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <linux/connector.h>
#define DEBUG
#define NETLINK_CONNECTOR 11
#ifdef DEBUG
#define ulog(f, a...) fprintf(stdout, f, ##a)
#else
#define ulog(f, a...) do {} while (0)
#endif
static int need_exit;
static __u32 seq;
static int netlink_send(int s, struct cn_msg *msg)
{
struct nlmsghdr *nlh;
unsigned int size;
int err;
char buf[128];
struct cn_msg *m;
size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
nlh = (struct nlmsghdr *)buf;
nlh->nlmsg_seq = seq++;
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
nlh->nlmsg_flags = 0;
m = NLMSG_DATA(nlh);
#if 0
ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
__func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
#endif
memcpy(m, msg, sizeof(*m) + msg->len);
err = send(s, nlh, size, 0);
if (err == -1)
ulog("Failed to send: %s [%d].\n",
strerror(errno), errno);
return err;
}
int main(int argc, char *argv[])
{
int s;
char buf[1024];
int len;
struct nlmsghdr *reply;
struct sockaddr_nl l_local;
struct cn_msg *data;
FILE *out;
time_t tm;
struct pollfd pfd;
if (argc < 2)
out = stdout;
else {
out = fopen(argv[1], "a+");
if (!out) {
ulog("Unable to open %s for writing: %s\n",
argv[1], strerror(errno));
out = stdout;
}
}
memset(buf, 0, sizeof(buf));
s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (s == -1) {
perror("socket");
return -1;
}
l_local.nl_family = AF_NETLINK;
l_local.nl_groups = 0x123; /* bitmask of requested groups */
l_local.nl_pid = 0;
if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
perror("bind");
close(s);
return -1;
}
#if 0
{
int on = 0x57; /* Additional group number */
setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
}
#endif
if (0) {
int i, j;
memset(buf, 0, sizeof(buf));
data = (struct cn_msg *)buf;
data->id.idx = 0x123;
data->id.val = 0x456;
data->seq = seq++;
data->ack = 0;
data->len = 0;
for (j=0; j<10; ++j) {
for (i=0; i<1000; ++i) {
len = netlink_send(s, data);
}
ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
}
return 0;
}
pfd.fd = s;
while (!need_exit) {
pfd.events = POLLIN;
pfd.revents = 0;
switch (poll(&pfd, 1, -1)) {
case 0:
need_exit = 1;
break;
case -1:
if (errno != EINTR) {
need_exit = 1;
break;
}
continue;
}
if (need_exit)
break;
memset(buf, 0, sizeof(buf));
len = recv(s, buf, sizeof(buf), 0);
if (len == -1) {
perror("recv buf");
close(s);
return -1;
}
reply = (struct nlmsghdr *)buf;
switch (reply->nlmsg_type) {
case NLMSG_ERROR:
fprintf(out, "Error message received.\n");
fflush(out);
break;
case NLMSG_DONE:
data = (struct cn_msg *)NLMSG_DATA(reply);
time(&tm);
fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
fflush(out);
break;
default:
break;
}
}
close(s);
return 0;
}

View File

@ -153,10 +153,13 @@ scaling_governor, and by "echoing" the name of another
that some governors won't load - they only
work on some specific architectures or
processors.
scaling_min_freq and
scaling_min_freq and
scaling_max_freq show the current "policy limits" (in
kHz). By echoing new values into these
files, you can change these limits.
NOTE: when setting a policy you need to
first set scaling_max_freq, then
scaling_min_freq.
If you have selected the "userspace" governor which allows you to

View File

@ -251,16 +251,24 @@ A: This is what you would need in your kernel code to receive notifications.
return NOTIFY_OK;
}
static struct notifier_block foobar_cpu_notifer =
static struct notifier_block __cpuinitdata foobar_cpu_notifer =
{
.notifier_call = foobar_cpu_callback,
};
You need to call register_cpu_notifier() from your init function.
Init functions could be of two types:
1. early init (init function called when only the boot processor is online).
2. late init (init function called _after_ all the CPUs are online).
In your init function,
For the first case, you should add the following to your init function
register_cpu_notifier(&foobar_cpu_notifier);
For the second case, you should add the following to your init function
register_hotcpu_notifier(&foobar_cpu_notifier);
You can fail PREPARE notifiers if something doesn't work to prepare resources.
This will stop the activity and send a following CANCELED event back.

View File

@ -217,6 +217,12 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs)
to represent the cpuset hierarchy provides for a familiar permission
and name space for cpusets, with a minimum of additional kernel code.
The cpus file in the root (top_cpuset) cpuset is read-only.
It automatically tracks the value of cpu_online_map, using a CPU
hotplug notifier. If and when memory nodes can be hotplugged,
we expect to make the mems file in the root cpuset read-only
as well, and have it track the value of node_online_map.
1.4 What are exclusive cpusets ?
--------------------------------

View File

@ -2565,10 +2565,10 @@ Your cooperation is appreciated.
243 = /dev/usb/dabusb3 Fourth dabusb device
180 block USB block devices
0 = /dev/uba First USB block device
8 = /dev/ubb Second USB block device
16 = /dev/ubc Thrid USB block device
...
0 = /dev/uba First USB block device
8 = /dev/ubb Second USB block device
16 = /dev/ubc Third USB block device
...
181 char Conrad Electronic parallel port radio clocks
0 = /dev/pcfclock0 First Conrad radio clock

View File

@ -0,0 +1,31 @@
What is imacfb?
===============
This is a generic EFI platform driver for Intel based Apple computers.
Imacfb is only for EFI booted Intel Macs.
Supported Hardware
==================
iMac 17"/20"
Macbook
Macbook Pro 15"/17"
MacMini
How to use it?
==============
Imacfb does not have any kind of autodetection of your machine.
You have to add the fillowing kernel parameters in your elilo.conf:
Macbook :
video=imacfb:macbook
MacMini :
video=imacfb:mini
Macbook Pro 15", iMac 17" :
video=imacfb:i17
Macbook Pro 17", iMac 20" :
video=imacfb:i20
--
Edgar Hucek <gimli@dark-green.com>

View File

@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt
- info on the 'in memory' filesystems ramfs, rootfs and initramfs.
reiser4.txt
- info on the Reiser4 filesystem based on dancing tree algorithms.
relayfs.txt
- info on relayfs, for efficient streaming from kernel to user space.
relay.txt
- info on relay, for efficient streaming from kernel to user space.
romfs.txt
- description of the ROMFS filesystem.
smbfs.txt

View File

@ -0,0 +1,479 @@
relay interface (formerly relayfs)
==================================
The relay interface provides a means for kernel applications to
efficiently log and transfer large quantities of data from the kernel
to userspace via user-defined 'relay channels'.
A 'relay channel' is a kernel->user data relay mechanism implemented
as a set of per-cpu kernel buffers ('channel buffers'), each
represented as a regular file ('relay file') in user space. Kernel
clients write into the channel buffers using efficient write
functions; these automatically log into the current cpu's channel
buffer. User space applications mmap() or read() from the relay files
and retrieve the data as it becomes available. The relay files
themselves are files created in a host filesystem, e.g. debugfs, and
are associated with the channel buffers using the API described below.
The format of the data logged into the channel buffers is completely
up to the kernel client; the relay interface does however provide
hooks which allow kernel clients to impose some structure on the
buffer data. The relay interface doesn't implement any form of data
filtering - this also is left to the kernel client. The purpose is to
keep things as simple as possible.
This document provides an overview of the relay interface API. The
details of the function parameters are documented along with the
functions in the relay interface code - please see that for details.
Semantics
=========
Each relay channel has one buffer per CPU, each buffer has one or more
sub-buffers. Messages are written to the first sub-buffer until it is
too full to contain a new message, in which case it it is written to
the next (if available). Messages are never split across sub-buffers.
At this point, userspace can be notified so it empties the first
sub-buffer, while the kernel continues writing to the next.
When notified that a sub-buffer is full, the kernel knows how many
bytes of it are padding i.e. unused space occurring because a complete
message couldn't fit into a sub-buffer. Userspace can use this
knowledge to copy only valid data.
After copying it, userspace can notify the kernel that a sub-buffer
has been consumed.
A relay channel can operate in a mode where it will overwrite data not
yet collected by userspace, and not wait for it to be consumed.
The relay channel itself does not provide for communication of such
data between userspace and kernel, allowing the kernel side to remain
simple and not impose a single interface on userspace. It does
provide a set of examples and a separate helper though, described
below.
The read() interface both removes padding and internally consumes the
read sub-buffers; thus in cases where read(2) is being used to drain
the channel buffers, special-purpose communication between kernel and
user isn't necessary for basic operation.
One of the major goals of the relay interface is to provide a low
overhead mechanism for conveying kernel data to userspace. While the
read() interface is easy to use, it's not as efficient as the mmap()
approach; the example code attempts to make the tradeoff between the
two approaches as small as possible.
klog and relay-apps example code
================================
The relay interface itself is ready to use, but to make things easier,
a couple simple utility functions and a set of examples are provided.
The relay-apps example tarball, available on the relay sourceforge
site, contains a set of self-contained examples, each consisting of a
pair of .c files containing boilerplate code for each of the user and
kernel sides of a relay application. When combined these two sets of
boilerplate code provide glue to easily stream data to disk, without
having to bother with mundane housekeeping chores.
The 'klog debugging functions' patch (klog.patch in the relay-apps
tarball) provides a couple of high-level logging functions to the
kernel which allow writing formatted text or raw data to a channel,
regardless of whether a channel to write into exists or not, or even
whether the relay interface is compiled into the kernel or not. These
functions allow you to put unconditional 'trace' statements anywhere
in the kernel or kernel modules; only when there is a 'klog handler'
registered will data actually be logged (see the klog and kleak
examples for details).
It is of course possible to use the relay interface from scratch,
i.e. without using any of the relay-apps example code or klog, but
you'll have to implement communication between userspace and kernel,
allowing both to convey the state of buffers (full, empty, amount of
padding). The read() interface both removes padding and internally
consumes the read sub-buffers; thus in cases where read(2) is being
used to drain the channel buffers, special-purpose communication
between kernel and user isn't necessary for basic operation. Things
such as buffer-full conditions would still need to be communicated via
some channel though.
klog and the relay-apps examples can be found in the relay-apps
tarball on http://relayfs.sourceforge.net
The relay interface user space API
==================================
The relay interface implements basic file operations for user space
access to relay channel buffer data. Here are the file operations
that are available and some comments regarding their behavior:
open() enables user to open an _existing_ channel buffer.
mmap() results in channel buffer being mapped into the caller's
memory space. Note that you can't do a partial mmap - you
must map the entire file, which is NRBUF * SUBBUFSIZE.
read() read the contents of a channel buffer. The bytes read are
'consumed' by the reader, i.e. they won't be available
again to subsequent reads. If the channel is being used
in no-overwrite mode (the default), it can be read at any
time even if there's an active kernel writer. If the
channel is being used in overwrite mode and there are
active channel writers, results may be unpredictable -
users should make sure that all logging to the channel has
ended before using read() with overwrite mode. Sub-buffer
padding is automatically removed and will not be seen by
the reader.
sendfile() transfer data from a channel buffer to an output file
descriptor. Sub-buffer padding is automatically removed
and will not be seen by the reader.
poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
notified when sub-buffer boundaries are crossed.
close() decrements the channel buffer's refcount. When the refcount
reaches 0, i.e. when no process or kernel client has the
buffer open, the channel buffer is freed.
In order for a user application to make use of relay files, the
host filesystem must be mounted. For example,
mount -t debugfs debugfs /debug
NOTE: the host filesystem doesn't need to be mounted for kernel
clients to create or use channels - it only needs to be
mounted when user space applications need access to the buffer
data.
The relay interface kernel API
==============================
Here's a summary of the API the relay interface provides to in-kernel clients:
TBD(curr. line MT:/API/)
channel management functions:
relay_open(base_filename, parent, subbuf_size, n_subbufs,
callbacks)
relay_close(chan)
relay_flush(chan)
relay_reset(chan)
channel management typically called on instigation of userspace:
relay_subbufs_consumed(chan, cpu, subbufs_consumed)
write functions:
relay_write(chan, data, length)
__relay_write(chan, data, length)
relay_reserve(chan, length)
callbacks:
subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
buf_mapped(buf, filp)
buf_unmapped(buf, filp)
create_buf_file(filename, parent, mode, buf, is_global)
remove_buf_file(dentry)
helper functions:
relay_buf_full(buf)
subbuf_start_reserve(buf, length)
Creating a channel
------------------
relay_open() is used to create a channel, along with its per-cpu
channel buffers. Each channel buffer will have an associated file
created for it in the host filesystem, which can be and mmapped or
read from in user space. The files are named basename0...basenameN-1
where N is the number of online cpus, and by default will be created
in the root of the filesystem (if the parent param is NULL). If you
want a directory structure to contain your relay files, you should
create it using the host filesystem's directory creation function,
e.g. debugfs_create_dir(), and pass the parent directory to
relay_open(). Users are responsible for cleaning up any directory
structure they create, when the channel is closed - again the host
filesystem's directory removal functions should be used for that,
e.g. debugfs_remove().
In order for a channel to be created and the host filesystem's files
associated with its channel buffers, the user must provide definitions
for two callback functions, create_buf_file() and remove_buf_file().
create_buf_file() is called once for each per-cpu buffer from
relay_open() and allows the user to create the file which will be used
to represent the corresponding channel buffer. The callback should
return the dentry of the file created to represent the channel buffer.
remove_buf_file() must also be defined; it's responsible for deleting
the file(s) created in create_buf_file() and is called during
relay_close().
Here are some typical definitions for these callbacks, in this case
using debugfs:
/*
* create_buf_file() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
int mode,
struct rchan_buf *buf,
int *is_global)
{
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/*
* remove_buf_file() callback. Removes relay file from debugfs.
*/
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/*
* relay interface callbacks
*/
static struct rchan_callbacks relay_callbacks =
{
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
And an example relay_open() invocation using them:
chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks);
If the create_buf_file() callback fails, or isn't defined, channel
creation and thus relay_open() will fail.
The total size of each per-cpu buffer is calculated by multiplying the
number of sub-buffers by the sub-buffer size passed into relay_open().
The idea behind sub-buffers is that they're basically an extension of
double-buffering to N buffers, and they also allow applications to
easily implement random-access-on-buffer-boundary schemes, which can
be important for some high-volume applications. The number and size
of sub-buffers is completely dependent on the application and even for
the same application, different conditions will warrant different
values for these parameters at different times. Typically, the right
values to use are best decided after some experimentation; in general,
though, it's safe to assume that having only 1 sub-buffer is a bad
idea - you're guaranteed to either overwrite data or lose events
depending on the channel mode being used.
The create_buf_file() implementation can also be defined in such a way
as to allow the creation of a single 'global' buffer instead of the
default per-cpu set. This can be useful for applications interested
mainly in seeing the relative ordering of system-wide events without
the need to bother with saving explicit timestamps for the purpose of
merging/sorting per-cpu files in a postprocessing step.
To have relay_open() create a global buffer, the create_buf_file()
implementation should set the value of the is_global outparam to a
non-zero value in addition to creating the file that will be used to
represent the single buffer. In the case of a global buffer,
create_buf_file() and remove_buf_file() will be called only once. The
normal channel-writing functions, e.g. relay_write(), can still be
used - writes from any cpu will transparently end up in the global
buffer - but since it is a global buffer, callers should make sure
they use the proper locking for such a buffer, either by wrapping
writes in a spinlock, or by copying a write function from relay.h and
creating a local version that internally does the proper locking.
Channel 'modes'
---------------
relay channels can be used in either of two modes - 'overwrite' or
'no-overwrite'. The mode is entirely determined by the implementation
of the subbuf_start() callback, as described below. The default if no
subbuf_start() callback is defined is 'no-overwrite' mode. If the
default mode suits your needs, and you plan to use the read()
interface to retrieve channel data, you can ignore the details of this
section, as it pertains mainly to mmap() implementations.
In 'overwrite' mode, also known as 'flight recorder' mode, writes
continuously cycle around the buffer and will never fail, but will
unconditionally overwrite old data regardless of whether it's actually
been consumed. In no-overwrite mode, writes will fail, i.e. data will
be lost, if the number of unconsumed sub-buffers equals the total
number of sub-buffers in the channel. It should be clear that if
there is no consumer or if the consumer can't consume sub-buffers fast
enough, data will be lost in either case; the only difference is
whether data is lost from the beginning or the end of a buffer.
As explained above, a relay channel is made of up one or more
per-cpu channel buffers, each implemented as a circular buffer
subdivided into one or more sub-buffers. Messages are written into
the current sub-buffer of the channel's current per-cpu buffer via the
write functions described below. Whenever a message can't fit into
the current sub-buffer, because there's no room left for it, the
client is notified via the subbuf_start() callback that a switch to a
new sub-buffer is about to occur. The client uses this callback to 1)
initialize the next sub-buffer if appropriate 2) finalize the previous
sub-buffer if appropriate and 3) return a boolean value indicating
whether or not to actually move on to the next sub-buffer.
To implement 'no-overwrite' mode, the userspace client would provide
an implementation of the subbuf_start() callback something like the
following:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
if (relay_buf_full(buf))
return 0;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
If the current buffer is full, i.e. all sub-buffers remain unconsumed,
the callback returns 0 to indicate that the buffer switch should not
occur yet, i.e. until the consumer has had a chance to read the
current set of ready sub-buffers. For the relay_buf_full() function
to make sense, the consumer is reponsible for notifying the relay
interface when sub-buffers have been consumed via
relay_subbufs_consumed(). Any subsequent attempts to write into the
buffer will again invoke the subbuf_start() callback with the same
parameters; only when the consumer has consumed one or more of the
ready sub-buffers will relay_buf_full() return 0, in which case the
buffer switch can continue.
The implementation of the subbuf_start() callback for 'overwrite' mode
would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
In this case, the relay_buf_full() check is meaningless and the
callback always returns 1, causing the buffer switch to occur
unconditionally. It's also meaningless for the client to use the
relay_subbufs_consumed() function in this mode, as it's never
consulted.
The default subbuf_start() implementation, used if the client doesn't
define any callbacks, or doesn't define the subbuf_start() callback,
implements the simplest possible 'no-overwrite' mode, i.e. it does
nothing but return 0.
Header information can be reserved at the beginning of each sub-buffer
by calling the subbuf_start_reserve() helper function from within the
subbuf_start() callback. This reserved area can be used to store
whatever information the client wants. In the example above, room is
reserved in each sub-buffer to store the padding count for that
sub-buffer. This is filled in for the previous sub-buffer in the
subbuf_start() implementation; the padding value for the previous
sub-buffer is passed into the subbuf_start() callback along with a
pointer to the previous sub-buffer, since the padding value isn't
known until a sub-buffer is filled. The subbuf_start() callback is
also called for the first sub-buffer when the channel is opened, to
give the client a chance to reserve space in it. In this case the
previous sub-buffer pointer passed into the callback will be NULL, so
the client should check the value of the prev_subbuf pointer before
writing into the previous sub-buffer.
Writing to a channel
--------------------
Kernel clients write data into the current cpu's channel buffer using
relay_write() or __relay_write(). relay_write() is the main logging
function - it uses local_irqsave() to protect the buffer and should be
used if you might be logging from interrupt context. If you know
you'll never be logging from interrupt context, you can use
__relay_write(), which only disables preemption. These functions
don't return a value, so you can't determine whether or not they
failed - the assumption is that you wouldn't want to check a return
value in the fast logging path anyway, and that they'll always succeed
unless the buffer is full and no-overwrite mode is being used, in
which case you can detect a failed write in the subbuf_start()
callback by calling the relay_buf_full() helper function.
relay_reserve() is used to reserve a slot in a channel buffer which
can be written to later. This would typically be used in applications
that need to write directly into a channel buffer without having to
stage data in a temporary buffer beforehand. Because the actual write
may not happen immediately after the slot is reserved, applications
using relay_reserve() can keep a count of the number of bytes actually
written, either in space reserved in the sub-buffers themselves or as
a separate array. See the 'reserve' example in the relay-apps tarball
at http://relayfs.sourceforge.net for an example of how this can be
done. Because the write is under control of the client and is
separated from the reserve, relay_reserve() doesn't protect the buffer
at all - it's up to the client to provide the appropriate
synchronization when using relay_reserve().
Closing a channel
-----------------
The client calls relay_close() when it's finished using the channel.
The channel and its associated buffers are destroyed when there are no
longer any references to any of the channel buffers. relay_flush()
forces a sub-buffer switch on all the channel buffers, and can be used
to finalize and process the last sub-buffers before the channel is
closed.
Misc
----
Some applications may want to keep a channel around and re-use it
rather than open and close a new channel for each use. relay_reset()
can be used for this purpose - it resets a channel to its initial
state without reallocating channel buffer memory or destroying
existing mappings. It should however only be called when it's safe to
do so, i.e. when the channel isn't currently being written to.
Finally, there are a couple of utility callbacks that can be used for
different purposes. buf_mapped() is called whenever a channel buffer
is mmapped from user space and buf_unmapped() is called when it's
unmapped. The client can use this notification to trigger actions
within the kernel application, such as enabling/disabling logging to
the channel.
Resources
=========
For news, example code, mailing list, etc. see the relay interface homepage:
http://relayfs.sourceforge.net
Credits
=======
The ideas and specs for the relay interface came about as a result of
discussions on tracing involving the following:
Michel Dagenais <michel.dagenais@polymtl.ca>
Richard Moore <richardj_moore@uk.ibm.com>
Bob Wisniewski <bob@watson.ibm.com>
Karim Yaghmour <karim@opersys.com>
Tom Zanussi <zanussi@us.ibm.com>
Also thanks to Hubertus Franke for a lot of useful suggestions and bug
reports.

View File

@ -1,442 +0,0 @@
relayfs - a high-speed data relay filesystem
============================================
relayfs is a filesystem designed to provide an efficient mechanism for
tools and facilities to relay large and potentially sustained streams
of data from kernel space to user space.
The main abstraction of relayfs is the 'channel'. A channel consists
of a set of per-cpu kernel buffers each represented by a file in the
relayfs filesystem. Kernel clients write into a channel using
efficient write functions which automatically log to the current cpu's
channel buffer. User space applications mmap() the per-cpu files and
retrieve the data as it becomes available.
The format of the data logged into the channel buffers is completely
up to the relayfs client; relayfs does however provide hooks which
allow clients to impose some structure on the buffer data. Nor does
relayfs implement any form of data filtering - this also is left to
the client. The purpose is to keep relayfs as simple as possible.
This document provides an overview of the relayfs API. The details of
the function parameters are documented along with the functions in the
filesystem code - please see that for details.
Semantics
=========
Each relayfs channel has one buffer per CPU, each buffer has one or
more sub-buffers. Messages are written to the first sub-buffer until
it is too full to contain a new message, in which case it it is
written to the next (if available). Messages are never split across
sub-buffers. At this point, userspace can be notified so it empties
the first sub-buffer, while the kernel continues writing to the next.
When notified that a sub-buffer is full, the kernel knows how many
bytes of it are padding i.e. unused. Userspace can use this knowledge
to copy only valid data.
After copying it, userspace can notify the kernel that a sub-buffer
has been consumed.
relayfs can operate in a mode where it will overwrite data not yet
collected by userspace, and not wait for it to consume it.
relayfs itself does not provide for communication of such data between
userspace and kernel, allowing the kernel side to remain simple and
not impose a single interface on userspace. It does provide a set of
examples and a separate helper though, described below.
klog and relay-apps example code
================================
relayfs itself is ready to use, but to make things easier, a couple
simple utility functions and a set of examples are provided.
The relay-apps example tarball, available on the relayfs sourceforge
site, contains a set of self-contained examples, each consisting of a
pair of .c files containing boilerplate code for each of the user and
kernel sides of a relayfs application; combined these two sets of
boilerplate code provide glue to easily stream data to disk, without
having to bother with mundane housekeeping chores.
The 'klog debugging functions' patch (klog.patch in the relay-apps
tarball) provides a couple of high-level logging functions to the
kernel which allow writing formatted text or raw data to a channel,
regardless of whether a channel to write into exists or not, or
whether relayfs is compiled into the kernel or is configured as a
module. These functions allow you to put unconditional 'trace'
statements anywhere in the kernel or kernel modules; only when there
is a 'klog handler' registered will data actually be logged (see the
klog and kleak examples for details).
It is of course possible to use relayfs from scratch i.e. without
using any of the relay-apps example code or klog, but you'll have to
implement communication between userspace and kernel, allowing both to
convey the state of buffers (full, empty, amount of padding).
klog and the relay-apps examples can be found in the relay-apps
tarball on http://relayfs.sourceforge.net
The relayfs user space API
==========================
relayfs implements basic file operations for user space access to
relayfs channel buffer data. Here are the file operations that are
available and some comments regarding their behavior:
open() enables user to open an _existing_ buffer.
mmap() results in channel buffer being mapped into the caller's
memory space. Note that you can't do a partial mmap - you must
map the entire file, which is NRBUF * SUBBUFSIZE.
read() read the contents of a channel buffer. The bytes read are
'consumed' by the reader i.e. they won't be available again
to subsequent reads. If the channel is being used in
no-overwrite mode (the default), it can be read at any time
even if there's an active kernel writer. If the channel is
being used in overwrite mode and there are active channel
writers, results may be unpredictable - users should make
sure that all logging to the channel has ended before using
read() with overwrite mode.
poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
notified when sub-buffer boundaries are crossed.
close() decrements the channel buffer's refcount. When the refcount
reaches 0 i.e. when no process or kernel client has the buffer
open, the channel buffer is freed.
In order for a user application to make use of relayfs files, the
relayfs filesystem must be mounted. For example,
mount -t relayfs relayfs /mnt/relay
NOTE: relayfs doesn't need to be mounted for kernel clients to create
or use channels - it only needs to be mounted when user space
applications need access to the buffer data.
The relayfs kernel API
======================
Here's a summary of the API relayfs provides to in-kernel clients:
channel management functions:
relay_open(base_filename, parent, subbuf_size, n_subbufs,
callbacks)
relay_close(chan)
relay_flush(chan)
relay_reset(chan)
relayfs_create_dir(name, parent)
relayfs_remove_dir(dentry)
relayfs_create_file(name, parent, mode, fops, data)
relayfs_remove_file(dentry)
channel management typically called on instigation of userspace:
relay_subbufs_consumed(chan, cpu, subbufs_consumed)
write functions:
relay_write(chan, data, length)
__relay_write(chan, data, length)
relay_reserve(chan, length)
callbacks:
subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
buf_mapped(buf, filp)
buf_unmapped(buf, filp)
create_buf_file(filename, parent, mode, buf, is_global)
remove_buf_file(dentry)
helper functions:
relay_buf_full(buf)
subbuf_start_reserve(buf, length)
Creating a channel
------------------
relay_open() is used to create a channel, along with its per-cpu
channel buffers. Each channel buffer will have an associated file
created for it in the relayfs filesystem, which can be opened and
mmapped from user space if desired. The files are named
basename0...basenameN-1 where N is the number of online cpus, and by
default will be created in the root of the filesystem. If you want a
directory structure to contain your relayfs files, you can create it
with relayfs_create_dir() and pass the parent directory to
relay_open(). Clients are responsible for cleaning up any directory
structure they create when the channel is closed - use
relayfs_remove_dir() for that.
The total size of each per-cpu buffer is calculated by multiplying the
number of sub-buffers by the sub-buffer size passed into relay_open().
The idea behind sub-buffers is that they're basically an extension of
double-buffering to N buffers, and they also allow applications to
easily implement random-access-on-buffer-boundary schemes, which can
be important for some high-volume applications. The number and size
of sub-buffers is completely dependent on the application and even for
the same application, different conditions will warrant different
values for these parameters at different times. Typically, the right
values to use are best decided after some experimentation; in general,
though, it's safe to assume that having only 1 sub-buffer is a bad
idea - you're guaranteed to either overwrite data or lose events
depending on the channel mode being used.
Channel 'modes'
---------------
relayfs channels can be used in either of two modes - 'overwrite' or
'no-overwrite'. The mode is entirely determined by the implementation
of the subbuf_start() callback, as described below. In 'overwrite'
mode, also known as 'flight recorder' mode, writes continuously cycle
around the buffer and will never fail, but will unconditionally
overwrite old data regardless of whether it's actually been consumed.
In no-overwrite mode, writes will fail i.e. data will be lost, if the
number of unconsumed sub-buffers equals the total number of
sub-buffers in the channel. It should be clear that if there is no
consumer or if the consumer can't consume sub-buffers fast enought,
data will be lost in either case; the only difference is whether data
is lost from the beginning or the end of a buffer.
As explained above, a relayfs channel is made of up one or more
per-cpu channel buffers, each implemented as a circular buffer
subdivided into one or more sub-buffers. Messages are written into
the current sub-buffer of the channel's current per-cpu buffer via the
write functions described below. Whenever a message can't fit into
the current sub-buffer, because there's no room left for it, the
client is notified via the subbuf_start() callback that a switch to a
new sub-buffer is about to occur. The client uses this callback to 1)
initialize the next sub-buffer if appropriate 2) finalize the previous
sub-buffer if appropriate and 3) return a boolean value indicating
whether or not to actually go ahead with the sub-buffer switch.
To implement 'no-overwrite' mode, the userspace client would provide
an implementation of the subbuf_start() callback something like the
following:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
if (relay_buf_full(buf))
return 0;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
If the current buffer is full i.e. all sub-buffers remain unconsumed,
the callback returns 0 to indicate that the buffer switch should not
occur yet i.e. until the consumer has had a chance to read the current
set of ready sub-buffers. For the relay_buf_full() function to make
sense, the consumer is reponsible for notifying relayfs when
sub-buffers have been consumed via relay_subbufs_consumed(). Any
subsequent attempts to write into the buffer will again invoke the
subbuf_start() callback with the same parameters; only when the
consumer has consumed one or more of the ready sub-buffers will
relay_buf_full() return 0, in which case the buffer switch can
continue.
The implementation of the subbuf_start() callback for 'overwrite' mode
would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
In this case, the relay_buf_full() check is meaningless and the
callback always returns 1, causing the buffer switch to occur
unconditionally. It's also meaningless for the client to use the
relay_subbufs_consumed() function in this mode, as it's never
consulted.
The default subbuf_start() implementation, used if the client doesn't
define any callbacks, or doesn't define the subbuf_start() callback,
implements the simplest possible 'no-overwrite' mode i.e. it does
nothing but return 0.
Header information can be reserved at the beginning of each sub-buffer
by calling the subbuf_start_reserve() helper function from within the
subbuf_start() callback. This reserved area can be used to store
whatever information the client wants. In the example above, room is
reserved in each sub-buffer to store the padding count for that
sub-buffer. This is filled in for the previous sub-buffer in the
subbuf_start() implementation; the padding value for the previous
sub-buffer is passed into the subbuf_start() callback along with a
pointer to the previous sub-buffer, since the padding value isn't
known until a sub-buffer is filled. The subbuf_start() callback is
also called for the first sub-buffer when the channel is opened, to
give the client a chance to reserve space in it. In this case the
previous sub-buffer pointer passed into the callback will be NULL, so
the client should check the value of the prev_subbuf pointer before
writing into the previous sub-buffer.
Writing to a channel
--------------------
kernel clients write data into the current cpu's channel buffer using
relay_write() or __relay_write(). relay_write() is the main logging
function - it uses local_irqsave() to protect the buffer and should be
used if you might be logging from interrupt context. If you know
you'll never be logging from interrupt context, you can use
__relay_write(), which only disables preemption. These functions
don't return a value, so you can't determine whether or not they
failed - the assumption is that you wouldn't want to check a return
value in the fast logging path anyway, and that they'll always succeed
unless the buffer is full and no-overwrite mode is being used, in
which case you can detect a failed write in the subbuf_start()
callback by calling the relay_buf_full() helper function.
relay_reserve() is used to reserve a slot in a channel buffer which
can be written to later. This would typically be used in applications
that need to write directly into a channel buffer without having to
stage data in a temporary buffer beforehand. Because the actual write
may not happen immediately after the slot is reserved, applications
using relay_reserve() can keep a count of the number of bytes actually
written, either in space reserved in the sub-buffers themselves or as
a separate array. See the 'reserve' example in the relay-apps tarball
at http://relayfs.sourceforge.net for an example of how this can be
done. Because the write is under control of the client and is
separated from the reserve, relay_reserve() doesn't protect the buffer
at all - it's up to the client to provide the appropriate
synchronization when using relay_reserve().
Closing a channel
-----------------
The client calls relay_close() when it's finished using the channel.
The channel and its associated buffers are destroyed when there are no
longer any references to any of the channel buffers. relay_flush()
forces a sub-buffer switch on all the channel buffers, and can be used
to finalize and process the last sub-buffers before the channel is
closed.
Creating non-relay files
------------------------
relay_open() automatically creates files in the relayfs filesystem to
represent the per-cpu kernel buffers; it's often useful for
applications to be able to create their own files alongside the relay
files in the relayfs filesystem as well e.g. 'control' files much like
those created in /proc or debugfs for similar purposes, used to
communicate control information between the kernel and user sides of a
relayfs application. For this purpose the relayfs_create_file() and
relayfs_remove_file() API functions exist. For relayfs_create_file(),
the caller passes in a set of user-defined file operations to be used
for the file and an optional void * to a user-specified data item,
which will be accessible via inode->u.generic_ip (see the relay-apps
tarball for examples). The file_operations are a required parameter
to relayfs_create_file() and thus the semantics of these files are
completely defined by the caller.
See the relay-apps tarball at http://relayfs.sourceforge.net for
examples of how these non-relay files are meant to be used.
Creating relay files in other filesystems
-----------------------------------------
By default of course, relay_open() creates relay files in the relayfs
filesystem. Because relay_file_operations is exported, however, it's
also possible to create and use relay files in other pseudo-filesytems
such as debugfs.
For this purpose, two callback functions are provided,
create_buf_file() and remove_buf_file(). create_buf_file() is called
once for each per-cpu buffer from relay_open() to allow the client to
create a file to be used to represent the corresponding buffer; if
this callback is not defined, the default implementation will create
and return a file in the relayfs filesystem to represent the buffer.
The callback should return the dentry of the file created to represent
the relay buffer. Note that the parent directory passed to
relay_open() (and passed along to the callback), if specified, must
exist in the same filesystem the new relay file is created in. If
create_buf_file() is defined, remove_buf_file() must also be defined;
it's responsible for deleting the file(s) created in create_buf_file()
and is called during relay_close().
The create_buf_file() implementation can also be defined in such a way
as to allow the creation of a single 'global' buffer instead of the
default per-cpu set. This can be useful for applications interested
mainly in seeing the relative ordering of system-wide events without
the need to bother with saving explicit timestamps for the purpose of
merging/sorting per-cpu files in a postprocessing step.
To have relay_open() create a global buffer, the create_buf_file()
implementation should set the value of the is_global outparam to a
non-zero value in addition to creating the file that will be used to
represent the single buffer. In the case of a global buffer,
create_buf_file() and remove_buf_file() will be called only once. The
normal channel-writing functions e.g. relay_write() can still be used
- writes from any cpu will transparently end up in the global buffer -
but since it is a global buffer, callers should make sure they use the
proper locking for such a buffer, either by wrapping writes in a
spinlock, or by copying a write function from relayfs_fs.h and
creating a local version that internally does the proper locking.
See the 'exported-relayfile' examples in the relay-apps tarball for
examples of creating and using relay files in debugfs.
Misc
----
Some applications may want to keep a channel around and re-use it
rather than open and close a new channel for each use. relay_reset()
can be used for this purpose - it resets a channel to its initial
state without reallocating channel buffer memory or destroying
existing mappings. It should however only be called when it's safe to
do so i.e. when the channel isn't currently being written to.
Finally, there are a couple of utility callbacks that can be used for
different purposes. buf_mapped() is called whenever a channel buffer
is mmapped from user space and buf_unmapped() is called when it's
unmapped. The client can use this notification to trigger actions
within the kernel application, such as enabling/disabling logging to
the channel.
Resources
=========
For news, example code, mailing list, etc. see the relayfs homepage:
http://relayfs.sourceforge.net
Credits
=======
The ideas and specs for relayfs came about as a result of discussions
on tracing involving the following:
Michel Dagenais <michel.dagenais@polymtl.ca>
Richard Moore <richardj_moore@uk.ibm.com>
Bob Wisniewski <bob@watson.ibm.com>
Karim Yaghmour <karim@opersys.com>
Tom Zanussi <zanussi@us.ibm.com>
Also thanks to Hubertus Franke for a lot of useful suggestions and bug
reports.

View File

@ -51,8 +51,6 @@ Debugging Information
References
IETF IP over InfiniBand (ipoib) Working Group
http://ietf.org/html.charters/ipoib-charter.html
Transmission of IP over InfiniBand (IPoIB) (RFC 4391)
http://ietf.org/rfc/rfc4391.txt
IP over InfiniBand (IPoIB) Architecture (RFC 4392)

View File

@ -72,6 +72,22 @@ initrd adds the following new options:
initrd is mounted as root, and the normal boot procedure is followed,
with the RAM disk still mounted as root.
Compressed cpio images
----------------------
Recent kernels have support for populating a ramdisk from a compressed cpio
archive, on such systems, the creation of a ramdisk image doesn't need to
involve special block devices or loopbacks, you merely create a directory on
disk with the desired initrd content, cd to that directory, and run (as an
example):
find . | cpio --quiet -c -o | gzip -9 -n > /boot/imagefile.img
Examining the contents of an existing image file is just as simple:
mkdir /tmp/imagefile
cd /tmp/imagefile
gzip -cd /boot/imagefile.img | cpio -imd --quiet
Installation
------------

View File

@ -39,7 +39,6 @@ them. Bug reports and success stories are also welcome.
The input project website is at:
http://www.suse.cz/development/input/
http://atrey.karlin.mff.cuni.cz/~vojtech/input/
There is also a mailing list for the driver at:

View File

@ -407,6 +407,20 @@ more details, with real examples.
The second argument is optional, and if supplied will be used
if first argument is not supported.
ld-option
ld-option is used to check if $(CC) when used to link object files
supports the given option. An optional second option may be
specified if first option are not supported.
Example:
#arch/i386/kernel/Makefile
vsyscall-flags += $(call ld-option, -Wl$(comma)--hash-style=sysv)
In the above example vsyscall-flags will be assigned the option
-Wl$(comma)--hash-style=sysv if it is supported by $(CC).
The second argument is optional, and if supplied will be used
if first argument is not supported.
cc-option
cc-option is used to check if $(CC) support a given option, and not
supported to use an optional second option.

View File

@ -448,8 +448,6 @@ running once the system is up.
Format: <area>[,<node>]
See also Documentation/networking/decnet.txt.
delayacct [KNL] Enable per-task delay accounting
dhash_entries= [KNL]
Set number of hash buckets for dentry cache.
@ -1031,6 +1029,8 @@ running once the system is up.
nocache [ARM]
nodelayacct [KNL] Disable per-task delay accounting
nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects.
noexec [IA-64]

View File

@ -247,7 +247,7 @@ the object-specific fields, which include:
- default_attrs: Default attributes to be exported via sysfs when the
object is registered.Note that the last attribute has to be
initialized to NULL ! You can find a complete implementation
in drivers/block/genhd.c
in block/genhd.c
Instances of struct kobj_type are not registered; only referenced by

View File

@ -294,15 +294,15 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
Default: 87380*2 bytes.
tcp_mem - vector of 3 INTEGERs: min, pressure, max
low: below this number of pages TCP is not bothered about its
min: below this number of pages TCP is not bothered about its
memory appetite.
pressure: when amount of memory allocated by TCP exceeds this number
of pages, TCP moderates its memory consumption and enters memory
pressure mode, which is exited when memory consumption falls
under "low".
under "min".
high: number of pages allowed for queueing by all TCP sockets.
max: number of pages allowed for queueing by all TCP sockets.
Defaults are calculated at boot time from amount of available
memory.

View File

@ -1196,7 +1196,7 @@ platforms are moved over to use the flattened-device-tree model.
- model : Model of the device. Can be "TSEC", "eTSEC", or "FEC"
- compatible : Should be "gianfar"
- reg : Offset and length of the register set for the device
- address : List of bytes representing the ethernet address of
- mac-address : List of bytes representing the ethernet address of
this controller
- interrupts : <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
@ -1216,7 +1216,7 @@ platforms are moved over to use the flattened-device-tree model.
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
address = [ 00 E0 0C 00 73 00 ];
mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 3 e 3 12 3>;
interrupt-parent = <40000>;
phy-handle = <2452000>
@ -1498,7 +1498,7 @@ not necessary as they are usually the same as the root node.
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
address = [ 00 E0 0C 00 73 00 ];
mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 3 e 3 12 3>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
@ -1511,7 +1511,7 @@ not necessary as they are usually the same as the root node.
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
address = [ 00 E0 0C 00 73 01 ];
mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 3 14 3 18 3>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
@ -1524,7 +1524,7 @@ not necessary as they are usually the same as the root node.
model = "FEC";
compatible = "gianfar";
reg = <26000 1000>;
address = [ 00 E0 0C 00 73 02 ];
mac-address = [ 00 E0 0C 00 73 02 ];
interrupts = <19 3>;
interrupt-parent = <40000>;
phy-handle = <2452002>;

View File

@ -1,3 +1,126 @@
Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
1. Fixed a bug in megaraid_init_mbox().
Customer reported "garbage in file on x86_64 platform".
Root Cause: the driver registered controllers as 64-bit DMA capable
for those which are not support it.
Fix: Made change in the function inserting identification machanism
identifying 64-bit DMA capable controllers.
> -----Original Message-----
> From: Vasily Averin [mailto:vvs@sw.ru]
> Sent: Thursday, May 04, 2006 2:49 PM
> To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul;
> Ju, Seokmann; Bagalkote, Sreenivas;
> James.Bottomley@SteelEye.com; devel@openvz.org
> Subject: megaraid_mbox: garbage in file
>
> Hello all,
>
> I've investigated customers claim on the unstable work of
> their node and found a
> strange effect: reading from some files leads to the
> "attempt to access beyond end of device" messages.
>
> I've checked filesystem, memory on the node, motherboard BIOS
> version, but it
> does not help and issue still has been reproduced by simple
> file reading.
>
> Reproducer is simple:
>
> echo 0xffffffff >/proc/sys/dev/scsi/logging_level ;
> cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ;
> echo 0 >/proc/sys/dev/scsi/logging
>
> It leads to the following messages in dmesg
>
> sd_init_command: disk=sda, block=871769260, count=26
> sda : block=871769260
> sda : reading 26/26 512 byte blocks.
> scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420)
> sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0:
> command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
> buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40,
> queuecommand 0xc0344010
> leaving scsi_dispatch_cmnd()
> scsi_delete_timer: scmd: f79ed980, rtn: 1
> sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0:
> command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
> scsi host busy 1 failed 0
> sd 0:1:0:0: Notifying upper driver of completion (result 0)
> sd_rw_intr: sda: res=0x0
> 26 sectors total, 13312 bytes done.
> use_sg is 4
> attempt to access beyond end of device
> sda6: rw=0, want=1044134458, limit=951401367
> Buffer I/O error on device sda6, logical block 522067228
> attempt to access beyond end of device
2. When INQUIRY with EVPD bit set issued to the MegaRAID controller,
system memory gets corrupted.
Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set
incorrectly.
Fix: MegaRAID F/W has fixed the problem and being process of release,
soon. Meanwhile, driver will filter out the request.
3. One of member in the data structure of the driver leads unaligne
issue on 64-bit platform.
Customer reporeted "kernel unaligned access addrss" issue when
application communicates with MegaRAID HBA driver.
Root Cause: in uioc_t structure, one of member had misaligned and it
led system to display the error message.
Fix: A patch submitted to community from following folk.
> -----Original Message-----
> From: linux-scsi-owner@vger.kernel.org
> [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi
> Sent: Wednesday, July 12, 2006 4:20 AM
> To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: Re: Help: strange messages from kernel on IA64 platform
>
> Hi,
>
> I saw same message.
>
> When GAM(Global Array Manager) is started, The following
> message output.
> kernel: kernel unaligned access to 0xe0000001fe1080d4,
> ip=0xa000000200053371
>
> The uioc structure used by ioctl is defined by packed,
> the allignment of each member are disturbed.
> In a 64 bit structure, the allignment of member doesn't fit 64 bit
> boundary. this causes this messages.
> In a 32 bit structure, we don't see the message because the allinment
> of member fit 32 bit boundary even if packed is specified.
>
> patch
> I Add 32 bit dummy member to fit 64 bit boundary. I tested.
> We confirmed this patch fix the problem by IA64 server.
>
> **************************************************************
> ****************
> --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig
> 2006-04-03 17:13:03.000000000 +0900
> +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h
> 2006-04-03 17:14:09.000000000 +0900
> @@ -132,6 +132,10 @@
> /* Driver Data: */
> void __user * user_data;
> uint32_t user_data_len;
> +
> + /* 64bit alignment */
> + uint32_t pad_0xBC;
> +
> mraid_passthru_t __user *user_pthru;
>
> mraid_passthru_t *pthru32;
> **************************************************************
> ****************
Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)

View File

@ -25,6 +25,7 @@ Currently, these files are in /proc/sys/fs:
- inode-state
- overflowuid
- overflowgid
- suid_dumpable
- super-max
- super-nr
@ -131,6 +132,25 @@ The default is 65534.
==============================================================
suid_dumpable:
This value can be used to query and set the core dump mode for setuid
or otherwise protected/tainted binaries. The modes are
0 - (default) - traditional behaviour. Any process which has changed
privilege levels or is execute only will not be dumped
1 - (debug) - all processes dump core when possible. The core dump is
owned by the current user and no security is applied. This is
intended for system debugging situations only. Ptrace is unchecked.
2 - (suidsafe) - any binary which normally would not be dumped is dumped
readable by root only. This allows the end user to remove
such a dump but not access it directly. For security reasons
core dumps in this mode will not overwrite one another or
other files. This mode is appropriate when adminstrators are
attempting to debug problems in a normal environment.
==============================================================
super-max & super-nr:
These numbers control the maximum number of superblocks, and

View File

@ -50,7 +50,6 @@ show up in /proc/sys/kernel:
- shmmax [ sysv ipc ]
- shmmni
- stop-a [ SPARC only ]
- suid_dumpable
- sysrq ==> Documentation/sysrq.txt
- tainted
- threads-max
@ -211,9 +210,8 @@ Controls the kernel's behaviour when an oops or BUG is encountered.
0: try to continue operation
1: delay a few seconds (to give klogd time to record the oops output) and
then panic. If the `panic' sysctl is also non-zero then the machine will
be rebooted.
1: panic immediatly. If the `panic' sysctl is also non-zero then the
machine will be rebooted.
==============================================================
@ -311,25 +309,6 @@ kernel. This value defaults to SHMMAX.
==============================================================
suid_dumpable:
This value can be used to query and set the core dump mode for setuid
or otherwise protected/tainted binaries. The modes are
0 - (default) - traditional behaviour. Any process which has changed
privilege levels or is execute only will not be dumped
1 - (debug) - all processes dump core when possible. The core dump is
owned by the current user and no security is applied. This is
intended for system debugging situations only. Ptrace is unchecked.
2 - (suidsafe) - any binary which normally would not be dumped is dumped
readable by root only. This allows the end user to remove
such a dump but not access it directly. For security reasons
core dumps in this mode will not overwrite one another or
other files. This mode is appropriate when adminstrators are
attempting to debug problems in a normal environment.
==============================================================
tainted:
Non-zero if the kernel has been tainted. Numeric values, which

View File

@ -59,7 +59,7 @@ bind to an interface (or perhaps several) using an ioctl call. You
would issue more ioctls to the device to communicate to it using
control, bulk, or other kinds of USB transfers. The IOCTLs are
listed in the <linux/usbdevice_fs.h> file, and at this writing the
source code (linux/drivers/usb/devio.c) is the primary reference
source code (linux/drivers/usb/core/devio.c) is the primary reference
for how to access devices through those files.
Note that since by default these BBB/DDD files are writable only by

View File

@ -5,8 +5,7 @@ For USB help other than the readme files that are located in
Documentation/usb/*, see the following:
Linux-USB project: http://www.linux-usb.org
mirrors at http://www.suse.cz/development/linux-usb/
and http://usb.in.tum.de/linux-usb/
mirrors at http://usb.in.tum.de/linux-usb/
and http://it.linux-usb.org
Linux USB Guide: http://linux-usb.sourceforge.net
Linux-USB device overview (working devices and drivers):

View File

@ -238,6 +238,13 @@ Debugging
pagefaulttrace Dump all page faults. Only useful for extreme debugging
and will create a lot of output.
call_trace=[old|both|newfallback|new]
old: use old inexact backtracer
new: use new exact dwarf2 unwinder
both: print entries from both
newfallback: use new unwinder but fall back to old if it gets
stuck (default)
Misc
noreplacement Don't replace instructions with more appropriate ones

View File

@ -214,6 +214,12 @@ W: http://acpi.sourceforge.net/
T: git kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
S: Maintained
ACPI PCI HOTPLUG DRIVER
P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com
L: pcihpd-discuss@lists.sourceforge.net
S: Maintained
AD1816 SOUND DRIVER
P: Thorsten Knabe
M: Thorsten Knabe <linux@thorsten-knabe.de>
@ -292,6 +298,13 @@ L: info-linux@geode.amd.com
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
AOA (Apple Onboard Audio) ALSA DRIVER
P: Johannes Berg
M: johannes@sipsolutions.net
L: linuxppc-dev@ozlabs.org
L: alsa-devel@alsa-project.org
S: Maintained
APM DRIVER
P: Stephen Rothwell
M: sfr@canb.auug.org.au
@ -876,6 +889,12 @@ M: rdunlap@xenotime.net
T: git http://tali.admingilde.org/git/linux-docbook.git
S: Maintained
DOCKING STATION DRIVER
P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com
L: linux-acpi@vger.kernel.org
S: Maintained
DOUBLETALK DRIVER
P: James R. Van Zandt
M: jrv@vanzandt.mv.com
@ -968,6 +987,10 @@ P: Andrey V. Savochkin
M: saw@saw.sw.com.sg
S: Maintained
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
EMU10K1 SOUND DRIVER
P: James Courtier-Dutton
M: James@superbug.demon.co.uk
@ -1598,7 +1621,7 @@ W: http://jfs.sourceforge.net/
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
S: Supported
JOURNALLING LAYER FOR BLOCK DEVICS (JBD)
JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
P: Stephen Tweedie, Andrew Morton
M: sct@redhat.com, akpm@osdl.org
L: ext2-devel@lists.sourceforge.net
@ -1642,9 +1665,8 @@ S: Maintained
KERNEL JANITORS
P: Several
L: kernel-janitors@osdl.org
L: kernel-janitors@lists.osdl.org
W: http://www.kerneljanitors.org/
W: http://sf.net/projects/kernel-janitor/
S: Maintained
KERNEL NFSD
@ -1882,6 +1904,12 @@ S: linux-scsi@vger.kernel.org
W: http://megaraid.lsilogic.com
S: Maintained
MEMORY MANAGEMENT
L: linux-mm@kvack.org
L: linux-kernel@vger.kernel.org
W: http://www.linux-mm.org
S: Maintained
MEMORY TECHNOLOGY DEVICES (MTD)
P: David Woodhouse
M: dwmw2@infradead.org
@ -2626,6 +2654,22 @@ M: dbrownell@users.sourceforge.net
L: spi-devel-general@lists.sourceforge.net
S: Maintained
STABLE BRANCH:
P: Greg Kroah-Hartman
M: greg@kroah.com
P: Chris Wright
M: chrisw@sous-sol.org
L: stable@kernel.org
S: Maintained
STABLE BRANCH:
P: Greg Kroah-Hartman
M: greg@kroah.com
P: Chris Wright
M: chrisw@sous-sol.org
L: stable@kernel.org
S: Maintained
TPM DEVICE DRIVER
P: Kylene Hall
M: kjhall@us.ibm.com

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 18
EXTRAVERSION = -rc2
EXTRAVERSION = -rc4
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
@ -309,9 +309,6 @@ CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -fno-common
# Force gcc to behave correct even for buggy distributions
CFLAGS += $(call cc-option, -fno-stack-protector-all \
-fno-stack-protector)
AFLAGS := -D__ASSEMBLY__
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
@ -368,6 +365,7 @@ endif
no-dot-config-targets := clean mrproper distclean \
cscope TAGS tags help %docs check% \
include/linux/version.h headers_% \
kernelrelease kernelversion
config-targets := 0
@ -435,12 +433,13 @@ core-y := usr/
endif # KBUILD_EXTMOD
ifeq ($(dot-config),1)
# In this section, we need .config
# Read in config
-include include/config/auto.conf
ifeq ($(KBUILD_EXTMOD),)
# Read in dependencies to all Kconfig* files, make sure to run
# oldconfig if changes are detected.
-include include/config/auto.conf.cmd
-include include/config/auto.conf
# To avoid any implicit rule to kick in, define an empty command
$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
@ -450,16 +449,27 @@ $(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
# if auto.conf.cmd is missing then we are probably in a cleaned tree so
# we execute the config step to be sure to catch updated Kconfig files
include/config/auto.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
ifeq ($(KBUILD_EXTMOD),)
$(Q)$(MAKE) -f $(srctree)/Makefile silentoldconfig
else
$(error kernel configuration not valid - run 'make prepare' in $(srctree) to update it)
endif
# external modules needs include/linux/autoconf.h and include/config/auto.conf
# but do not care if they are up-to-date. Use auto.conf to trigger the test
PHONY += include/config/auto.conf
include/config/auto.conf:
$(Q)test -e include/linux/autoconf.h -a -e $@ || ( \
echo; \
echo " ERROR: Kernel configuration is invalid."; \
echo " include/linux/autoconf.h or $@ are missing."; \
echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
echo; \
/bin/false)
endif # KBUILD_EXTMOD
else
# Dummy target needed, because used as prerequisite
include/config/auto.conf: ;
endif
endif # $(dot-config)
# The all: target is the default when no target is given on the
# command line.
@ -473,6 +483,8 @@ else
CFLAGS += -O2
endif
include $(srctree)/arch/$(ARCH)/Makefile
ifdef CONFIG_FRAME_POINTER
CFLAGS += -fno-omit-frame-pointer $(call cc-option,-fno-optimize-sibling-calls,)
else
@ -487,7 +499,8 @@ ifdef CONFIG_DEBUG_INFO
CFLAGS += -g
endif
include $(srctree)/arch/$(ARCH)/Makefile
# Force gcc to behave correct even for buggy distributions
CFLAGS += $(call cc-option, -fno-stack-protector)
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)

View File

@ -274,16 +274,14 @@ ev7_process_pal_subpacket(struct el_subpacket *header)
struct el_subpacket_handler ev7_pal_subpacket_handler =
SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
void
void
ev7_register_error_handlers(void)
{
int i;
for(i = 0;
i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
i++) {
for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++)
cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
}
cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
}

View File

@ -623,12 +623,12 @@ osf_sysinfo(int command, char __user *buf, long count)
long len, err = -EINVAL;
offset = command-1;
if (offset >= sizeof(sysinfo_table)/sizeof(char *)) {
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
goto out;
}
down_read(&uts_sem);
res = sysinfo_table[offset];
len = strlen(res)+1;

View File

@ -114,8 +114,6 @@ struct alpha_machine_vector alpha_mv;
int alpha_using_srm;
#endif
#define N(a) (sizeof(a)/sizeof(a[0]))
static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
unsigned long);
static struct alpha_machine_vector *get_sysvec_byname(const char *);
@ -240,7 +238,7 @@ reserve_std_resources(void)
standard_io_resources[0].start = RTC_PORT(0);
standard_io_resources[0].end = RTC_PORT(0) + 0x10;
for (i = 0; i < N(standard_io_resources); ++i)
for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
request_resource(io, standard_io_resources+i);
}
@ -918,13 +916,13 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
/* Search the system tables first... */
vec = NULL;
if (type < N(systype_vecs)) {
if (type < ARRAY_SIZE(systype_vecs)) {
vec = systype_vecs[type];
} else if ((type > ST_API_BIAS) &&
(type - ST_API_BIAS) < N(api_vecs)) {
(type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
vec = api_vecs[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
(type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
}
@ -938,11 +936,11 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
switch (type) {
case ST_DEC_ALCOR:
if (member < N(alcor_indices))
if (member < ARRAY_SIZE(alcor_indices))
vec = alcor_vecs[alcor_indices[member]];
break;
case ST_DEC_EB164:
if (member < N(eb164_indices))
if (member < ARRAY_SIZE(eb164_indices))
vec = eb164_vecs[eb164_indices[member]];
/* PC164 may show as EB164 variation with EV56 CPU,
but, since no true EB164 had anything but EV5... */
@ -950,24 +948,24 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
vec = &pc164_mv;
break;
case ST_DEC_EB64P:
if (member < N(eb64p_indices))
if (member < ARRAY_SIZE(eb64p_indices))
vec = eb64p_vecs[eb64p_indices[member]];
break;
case ST_DEC_EB66:
if (member < N(eb66_indices))
if (member < ARRAY_SIZE(eb66_indices))
vec = eb66_vecs[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
if (member < N(marvel_indices))
if (member < ARRAY_SIZE(marvel_indices))
vec = marvel_vecs[marvel_indices[member]];
break;
case ST_DEC_TITAN:
vec = titan_vecs[0]; /* default */
if (member < N(titan_indices))
if (member < ARRAY_SIZE(titan_indices))
vec = titan_vecs[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
if (member < N(tsunami_indices))
if (member < ARRAY_SIZE(tsunami_indices))
vec = tsunami_vecs[tsunami_indices[member]];
break;
case ST_DEC_1000:
@ -1039,7 +1037,7 @@ get_sysvec_byname(const char *name)
size_t i;
for (i = 0; i < N(all_vecs); ++i) {
for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
struct alpha_machine_vector *mv = all_vecs[i];
if (strcasecmp(mv->vector_name, name) == 0)
return mv;
@ -1055,13 +1053,13 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
/* If not in the tables, make it UNKNOWN,
else set type name to family */
if (type < N(systype_names)) {
if (type < ARRAY_SIZE(systype_names)) {
*type_name = systype_names[type];
} else if ((type > ST_API_BIAS) &&
(type - ST_API_BIAS) < N(api_names)) {
(type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
*type_name = api_names[type - ST_API_BIAS];
} else if ((type > ST_UNOFFICIAL_BIAS) &&
(type - ST_UNOFFICIAL_BIAS) < N(unofficial_names)) {
(type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
*type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
} else {
*type_name = sys_unknown;
@ -1083,7 +1081,7 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
default: /* default to variation "0" for now */
break;
case ST_DEC_EB164:
if (member < N(eb164_indices))
if (member < ARRAY_SIZE(eb164_indices))
*variation_name = eb164_names[eb164_indices[member]];
/* PC164 may show as EB164 variation, but with EV56 CPU,
so, since no true EB164 had anything but EV5... */
@ -1091,32 +1089,32 @@ get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
*variation_name = eb164_names[1]; /* make it PC164 */
break;
case ST_DEC_ALCOR:
if (member < N(alcor_indices))
if (member < ARRAY_SIZE(alcor_indices))
*variation_name = alcor_names[alcor_indices[member]];
break;
case ST_DEC_EB64P:
if (member < N(eb64p_indices))
if (member < ARRAY_SIZE(eb64p_indices))
*variation_name = eb64p_names[eb64p_indices[member]];
break;
case ST_DEC_EB66:
if (member < N(eb66_indices))
if (member < ARRAY_SIZE(eb66_indices))
*variation_name = eb66_names[eb66_indices[member]];
break;
case ST_DEC_MARVEL:
if (member < N(marvel_indices))
if (member < ARRAY_SIZE(marvel_indices))
*variation_name = marvel_names[marvel_indices[member]];
break;
case ST_DEC_RAWHIDE:
if (member < N(rawhide_indices))
if (member < ARRAY_SIZE(rawhide_indices))
*variation_name = rawhide_names[rawhide_indices[member]];
break;
case ST_DEC_TITAN:
*variation_name = titan_names[0]; /* default */
if (member < N(titan_indices))
if (member < ARRAY_SIZE(titan_indices))
*variation_name = titan_names[titan_indices[member]];
break;
case ST_DEC_TSUNAMI:
if (member < N(tsunami_indices))
if (member < ARRAY_SIZE(tsunami_indices))
*variation_name = tsunami_names[tsunami_indices[member]];
break;
}
@ -1211,7 +1209,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
cpu_index = (unsigned) (cpu->type - 1);
cpu_name = "Unknown";
if (cpu_index < N(cpu_names))
if (cpu_index < ARRAY_SIZE(cpu_names))
cpu_name = cpu_names[cpu_index];
get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,

View File

@ -182,16 +182,16 @@ static unsigned long __init
ruffian_get_bank_size(unsigned long offset)
{
unsigned long bank_addr, bank, ret = 0;
/* Valid offsets are: 0x800, 0x840 and 0x880
since Ruffian only uses three banks. */
bank_addr = (unsigned long)PYXIS_MCR + offset;
bank = *(vulp)bank_addr;
/* Check BANK_ENABLE */
if (bank & 0x01) {
static unsigned long size[] __initdata = {
0x40000000UL, /* 0x00, 1G */
0x40000000UL, /* 0x00, 1G */
0x20000000UL, /* 0x02, 512M */
0x10000000UL, /* 0x04, 256M */
0x08000000UL, /* 0x06, 128M */
@ -203,7 +203,7 @@ ruffian_get_bank_size(unsigned long offset)
};
bank = (bank & 0x1e) >> 1;
if (bank < sizeof(size)/sizeof(*size))
if (bank < ARRAY_SIZE(size))
ret = size[bank];
}

View File

@ -233,7 +233,7 @@ validate_cc_value(unsigned long cc)
index = cpu->type & 0xffffffff;
/* If index out of bounds, no way to validate. */
if (index >= sizeof(cpu_hz)/sizeof(cpu_hz[0]))
if (index >= ARRAY_SIZE(cpu_hz))
return cc;
/* If index contains no data, no way to validate. */

View File

@ -179,17 +179,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
struct safe_buffer *b = NULL;
struct safe_buffer *b, *rb = NULL;
unsigned long flags;
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr)
if (b->safe_dma_addr == safe_dma_addr) {
rb = b;
break;
}
read_unlock_irqrestore(&device_info->lock, flags);
return b;
return rb;
}
static inline void

View File

@ -95,7 +95,8 @@ static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
}
#endif
static struct irqchip gic_chip = {
static struct irq_chip gic_chip = {
.name = "GIC",
.ack = gic_ack_irq,
.mask = gic_mask_irq,
.unmask = gic_unmask_irq,

View File

@ -204,7 +204,8 @@ static void locomo_unmask_irq(unsigned int irq)
locomo_writel(r, mapbase + LOCOMO_ICR);
}
static struct irqchip locomo_chip = {
static struct irq_chip locomo_chip = {
.name = "LOCOMO",
.ack = locomo_ack_irq,
.mask = locomo_mask_irq,
.unmask = locomo_unmask_irq,
@ -249,7 +250,8 @@ static void locomo_key_unmask_irq(unsigned int irq)
locomo_writel(r, mapbase + LOCOMO_KEYBOARD + LOCOMO_KIC);
}
static struct irqchip locomo_key_chip = {
static struct irq_chip locomo_key_chip = {
.name = "LOCOMO-key",
.ack = locomo_key_ack_irq,
.mask = locomo_key_mask_irq,
.unmask = locomo_key_unmask_irq,
@ -312,7 +314,8 @@ static void locomo_gpio_unmask_irq(unsigned int irq)
locomo_writel(r, mapbase + LOCOMO_GIE);
}
static struct irqchip locomo_gpio_chip = {
static struct irq_chip locomo_gpio_chip = {
.name = "LOCOMO-gpio",
.ack = locomo_gpio_ack_irq,
.mask = locomo_gpio_mask_irq,
.unmask = locomo_gpio_unmask_irq,
@ -357,7 +360,8 @@ static void locomo_lt_unmask_irq(unsigned int irq)
locomo_writel(r, mapbase + LOCOMO_LTINT);
}
static struct irqchip locomo_lt_chip = {
static struct irq_chip locomo_lt_chip = {
.name = "LOCOMO-lt",
.ack = locomo_lt_ack_irq,
.mask = locomo_lt_mask_irq,
.unmask = locomo_lt_unmask_irq,
@ -418,7 +422,8 @@ static void locomo_spi_unmask_irq(unsigned int irq)
locomo_writel(r, mapbase + LOCOMO_SPIIE);
}
static struct irqchip locomo_spi_chip = {
static struct irq_chip locomo_spi_chip = {
.name = "LOCOMO-spi",
.ack = locomo_spi_ack_irq,
.mask = locomo_spi_mask_irq,
.unmask = locomo_spi_unmask_irq,

View File

@ -68,6 +68,7 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc
rtc_time_to_tm(next_time, next);
}
}
EXPORT_SYMBOL(rtc_next_alarm_time);
static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm)
{

View File

@ -272,7 +272,8 @@ static int sa1111_wake_lowirq(unsigned int irq, unsigned int on)
return 0;
}
static struct irqchip sa1111_low_chip = {
static struct irq_chip sa1111_low_chip = {
.name = "SA1111-l",
.ack = sa1111_ack_irq,
.mask = sa1111_mask_lowirq,
.unmask = sa1111_unmask_lowirq,
@ -368,7 +369,8 @@ static int sa1111_wake_highirq(unsigned int irq, unsigned int on)
return 0;
}
static struct irqchip sa1111_high_chip = {
static struct irq_chip sa1111_high_chip = {
.name = "SA1111-h",
.ack = sa1111_ack_irq,
.mask = sa1111_mask_highirq,
.unmask = sa1111_unmask_highirq,

View File

@ -39,7 +39,8 @@ static void vic_unmask_irq(unsigned int irq)
writel(1 << irq, base + VIC_INT_ENABLE);
}
static struct irqchip vic_chip = {
static struct irq_chip vic_chip = {
.name = "VIC",
.ack = vic_mask_irq,
.mask = vic_mask_irq,
.unmask = vic_unmask_irq,

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.18-rc1
# Sun Jul 9 15:21:30 2006
# Linux kernel version: 2.6.18-rc1-git9
# Sat Jul 15 15:08:10 2006
#
CONFIG_ARM=y
CONFIG_MMU=y
@ -30,6 +30,7 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
CONFIG_IKCONFIG=y
@ -749,7 +750,7 @@ CONFIG_VIDEO_V4L2=y
# USB support
#
CONFIG_USB_ARCH_HAS_HCD=y
# CONFIG_USB_ARCH_HAS_OHCI is not set
CONFIG_USB_ARCH_HAS_OHCI=y
# CONFIG_USB_ARCH_HAS_EHCI is not set
CONFIG_USB=y
CONFIG_USB_DEBUG=y
@ -766,6 +767,9 @@ CONFIG_USB_DYNAMIC_MINORS=y
# USB Host Controller Drivers
#
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_SL811_HCD is not set
#
@ -855,6 +859,7 @@ CONFIG_USB_SERIAL_CONSOLE=y
CONFIG_USB_SERIAL_PL2303=y
# CONFIG_USB_SERIAL_HP4X is not set
# CONFIG_USB_SERIAL_SAFE is not set
# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
# CONFIG_USB_SERIAL_TI is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
@ -871,7 +876,7 @@ CONFIG_USB_SERIAL_PL2303=y
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CY7C63 is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGETKIT is not set
# CONFIG_USB_PHIDGETSERVO is not set
@ -916,6 +921,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_X1205 is not set
# CONFIG_RTC_DRV_DS1307 is not set
# CONFIG_RTC_DRV_DS1553 is not set
# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_DS1672 is not set
# CONFIG_RTC_DRV_DS1742 is not set
# CONFIG_RTC_DRV_PCF8563 is not set
@ -1023,7 +1029,6 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_CIFS_DEBUG2 is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set

View File

@ -470,7 +470,8 @@ static void ecard_irq_mask(unsigned int irqnr)
}
}
static struct irqchip ecard_chip = {
static struct irq_chip ecard_chip = {
.name = "ECARD",
.ack = ecard_irq_mask,
.mask = ecard_irq_mask,
.unmask = ecard_irq_unmask,

View File

@ -634,6 +634,14 @@ ENTRY(__switch_to)
* purpose.
*/
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
bx \reg
#else
mov pc, \reg
#endif
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@ -675,7 +683,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
usr_ret lr
.align 5
@ -778,7 +786,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
mov r0, #-1
adds r0, r0, #0
#endif
mov pc, lr
usr_ret lr
#else
@ -792,7 +800,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
usr_ret lr
#endif
@ -834,16 +842,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
__kuser_get_tls: @ 0xffff0fe0
#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
mov pc, lr
#else
mrc p15, 0, r0, c13, c0, 3 @ read TLS register
mov pc, lr
#endif
usr_ret lr
.rep 5
.word 0 @ pad up to __kuser_helper_version

View File

@ -114,18 +114,18 @@ ENTRY(secondary_startup)
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
ldmia r4, {r5, r6, r13} @ address to jump to after
ldmia r4, {r5, r7, r13} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r6, r4] @ get secondary_data.pgdir
ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address
add pc, r10, #12 @ initialise processor
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
@ (return control reg)
/*
* r6 = &secondary_data
*/
ENTRY(__secondary_switched)
ldr sp, [r6, #4] @ get secondary_data.stack
ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel

View File

@ -77,6 +77,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
seq_printf(p, " %10s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);

View File

@ -232,11 +232,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
if (panic_on_oops)
panic("Fatal exception");
}
do_exit(SIGSEGV);
}

View File

@ -327,7 +327,8 @@ static int gpio_irq_type(unsigned pin, unsigned type)
return (type == IRQT_BOTHEDGE) ? 0 : -EINVAL;
}
static struct irqchip gpio_irqchip = {
static struct irq_chip gpio_irqchip = {
.name = "GPIO",
.mask = gpio_irq_mask,
.unmask = gpio_irq_unmask,
.set_type = gpio_irq_type,

View File

@ -114,7 +114,8 @@ void at91_irq_resume(void)
#define at91_aic_set_wake NULL
#endif
static struct irqchip at91_aic_chip = {
static struct irq_chip at91_aic_chip = {
.name = "AIC",
.ack = at91_aic_mask_irq,
.mask = at91_aic_mask_irq,
.unmask = at91_aic_unmask_irq,

View File

@ -8,7 +8,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/screen_info.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>

View File

@ -204,13 +204,15 @@ imx_gpiod_demux_handler(unsigned int irq_unused, struct irqdesc *desc,
imx_gpio_handler(mask, irq, desc, regs);
}
static struct irqchip imx_internal_chip = {
static struct irq_chip imx_internal_chip = {
.name = "MPU",
.ack = imx_mask_irq,
.mask = imx_mask_irq,
.unmask = imx_unmask_irq,
};
static struct irqchip imx_gpio_chip = {
static struct irq_chip imx_gpio_chip = {
.name = "GPIO",
.ack = imx_gpio_ack_irq,
.mask = imx_gpio_mask_irq,
.unmask = imx_gpio_unmask_irq,

View File

@ -161,7 +161,8 @@ static void sc_unmask_irq(unsigned int irq)
writel(1 << irq, VA_IC_BASE + IRQ_ENABLE_SET);
}
static struct irqchip sc_chip = {
static struct irq_chip sc_chip = {
.name = "SC",
.ack = sc_mask_irq,
.mask = sc_mask_irq,
.unmask = sc_unmask_irq,

View File

@ -156,7 +156,8 @@ static void cic_unmask_irq(unsigned int irq)
cic_writel(1 << irq, INTCP_VA_CIC_BASE + IRQ_ENABLE_SET);
}
static struct irqchip cic_chip = {
static struct irq_chip cic_chip = {
.name = "CIC",
.ack = cic_mask_irq,
.mask = cic_mask_irq,
.unmask = cic_unmask_irq,
@ -174,7 +175,8 @@ static void pic_unmask_irq(unsigned int irq)
pic_writel(1 << irq, INTCP_VA_PIC_BASE + IRQ_ENABLE_SET);
}
static struct irqchip pic_chip = {
static struct irq_chip pic_chip = {
.name = "PIC",
.ack = pic_mask_irq,
.mask = pic_mask_irq,
.unmask = pic_unmask_irq,
@ -192,7 +194,8 @@ static void sic_unmask_irq(unsigned int irq)
sic_writel(1 << irq, INTCP_VA_SIC_BASE + IRQ_ENABLE_SET);
}
static struct irqchip sic_chip = {
static struct irq_chip sic_chip = {
.name = "SIC",
.ack = sic_mask_irq,
.mask = sic_mask_irq,
.unmask = sic_unmask_irq,

View File

@ -52,7 +52,8 @@ iop321_irq_unmask (unsigned int irq)
intctl_write(iop321_mask);
}
struct irqchip ext_chip = {
struct irq_chip ext_chip = {
.name = "IOP",
.ack = iop321_irq_mask,
.mask = iop321_irq_mask,
.unmask = iop321_irq_unmask,

View File

@ -77,13 +77,15 @@ iop331_irq_unmask2(unsigned int irq)
intctl_write1(iop331_mask1);
}
struct irqchip iop331_irqchip1 = {
struct irq_chip iop331_irqchip1 = {
.name = "IOP-1",
.ack = iop331_irq_mask1,
.mask = iop331_irq_mask1,
.unmask = iop331_irq_unmask1,
};
struct irqchip iop331_irqchip2 = {
struct irq_chip iop331_irqchip2 = {
.name = "IOP-2",
.ack = iop331_irq_mask2,
.mask = iop331_irq_mask2,
.unmask = iop331_irq_unmask2,

View File

@ -532,8 +532,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
}
EXPORT_SYMBOL(pci_set_dma_mask);
EXPORT_SYMBOL(pci_set_consistent_dma_mask);
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);

View File

@ -107,9 +107,9 @@ static struct flash_platform_data gtwx5715_flash_data = {
.width = 2,
};
static struct gtw5715_flash_resource = {
static struct resource gtwx5715_flash_resource = {
.flags = IORESOURCE_MEM,
}
};
static struct platform_device gtwx5715_flash = {
.name = "IXP4XX-Flash",
@ -130,9 +130,6 @@ static void __init gtwx5715_init(void)
{
ixp4xx_sys_init();
if (!flash_resource)
printk(KERN_ERR "Could not allocate flash resource\n");
gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;

View File

@ -63,7 +63,8 @@ static void kev7a400_unmask_cpld_irq (u32 irq)
CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
}
static struct irqchip kev7a400_cpld_chip = {
static struct irq_chip kev7a400_cpld_chip = {
.name = "CPLD",
.ack = kev7a400_ack_cpld_irq,
.mask = kev7a400_mask_cpld_irq,
.unmask = kev7a400_unmask_cpld_irq,

View File

@ -200,7 +200,8 @@ static void lh7a40x_unmask_cpld_irq (u32 irq)
}
}
static struct irqchip lpd7a40x_cpld_chip = {
static struct irq_chip lpd7a40x_cpld_chip = {
.name = "CPLD",
.ack = lh7a40x_ack_cpld_irq,
.mask = lh7a40x_mask_cpld_irq,
.unmask = lh7a40x_unmask_cpld_irq,

View File

@ -43,7 +43,8 @@ lh7a400_unmask_cpld_irq (u32 irq)
}
static struct
irqchip lh7a400_cpld_chip = {
irq_chip lh7a400_cpld_chip = {
.name = "CPLD",
.ack = lh7a400_ack_cpld_irq,
.mask = lh7a400_mask_cpld_irq,
.unmask = lh7a400_unmask_cpld_irq,

View File

@ -38,13 +38,15 @@ static void lh7a400_ack_gpio_irq (u32 irq)
INTC_INTENC = (1 << irq);
}
static struct irqchip lh7a400_internal_chip = {
static struct irq_chip lh7a400_internal_chip = {
.name = "MPU",
.ack = lh7a400_mask_irq, /* Level triggering -> mask is ack */
.mask = lh7a400_mask_irq,
.unmask = lh7a400_unmask_irq,
};
static struct irqchip lh7a400_gpio_chip = {
static struct irq_chip lh7a400_gpio_chip = {
.name = "GPIO",
.ack = lh7a400_ack_gpio_irq,
.mask = lh7a400_mask_irq,
.unmask = lh7a400_unmask_irq,

View File

@ -76,25 +76,29 @@ static void lh7a404_vic2_ack_gpio_irq (u32 irq)
VIC2_INTENCLR = (1 << irq);
}
static struct irqchip lh7a404_vic1_chip = {
static struct irq_chip lh7a404_vic1_chip = {
.name = "VIC1",
.ack = lh7a404_vic1_mask_irq, /* Because level-triggered */
.mask = lh7a404_vic1_mask_irq,
.unmask = lh7a404_vic1_unmask_irq,
};
static struct irqchip lh7a404_vic2_chip = {
static struct irq_chip lh7a404_vic2_chip = {
.name = "VIC2",
.ack = lh7a404_vic2_mask_irq, /* Because level-triggered */
.mask = lh7a404_vic2_mask_irq,
.unmask = lh7a404_vic2_unmask_irq,
};
static struct irqchip lh7a404_gpio_vic1_chip = {
static struct irq_chip lh7a404_gpio_vic1_chip = {
.name = "GPIO-VIC1",
.ack = lh7a404_vic1_ack_gpio_irq,
.mask = lh7a404_vic1_mask_irq,
.unmask = lh7a404_vic1_unmask_irq,
};
static struct irqchip lh7a404_gpio_vic2_chip = {
static struct irq_chip lh7a404_gpio_vic2_chip = {
.name = "GPIO-VIC2",
.ack = lh7a404_vic2_ack_gpio_irq,
.mask = lh7a404_vic2_mask_irq,
.unmask = lh7a404_vic2_unmask_irq,

View File

@ -50,7 +50,8 @@ static void lh7a40x_unmask_cpld_irq (u32 irq)
}
}
static struct irqchip lh7a40x_cpld_chip = {
static struct irq_chip lh7a40x_cpld_chip = {
.name = "CPLD",
.ack = lh7a40x_ack_cpld_irq,
.mask = lh7a40x_mask_cpld_irq,
.unmask = lh7a40x_unmask_cpld_irq,

View File

@ -106,14 +106,16 @@ void innovator_fpga_IRQ_demux(unsigned int irq, struct irqdesc *desc,
}
}
static struct irqchip omap_fpga_irq_ack = {
static struct irq_chip omap_fpga_irq_ack = {
.name = "FPGA-ack",
.ack = fpga_mask_ack_irq,
.mask = fpga_mask_irq,
.unmask = fpga_unmask_irq,
};
static struct irqchip omap_fpga_irq = {
static struct irq_chip omap_fpga_irq = {
.name = "FPGA",
.ack = fpga_ack_irq,
.mask = fpga_mask_irq,
.unmask = fpga_unmask_irq,

View File

@ -168,7 +168,8 @@ static struct omap_irq_bank omap1610_irq_banks[] = {
};
#endif
static struct irqchip omap_irq_chip = {
static struct irq_chip omap_irq_chip = {
.name = "MPU",
.ack = omap_mask_ack_irq,
.mask = omap_mask_irq,
.unmask = omap_unmask_irq,

View File

@ -94,7 +94,8 @@ static void omap_mask_ack_irq(unsigned int irq)
omap_ack_irq(irq);
}
static struct irqchip omap_irq_chip = {
static struct irq_chip omap_irq_chip = {
.name = "INTC",
.ack = omap_mask_ack_irq,
.mask = omap_mask_irq,
.unmask = omap_unmask_irq,

View File

@ -39,7 +39,8 @@ static void pxa_unmask_low_irq(unsigned int irq)
ICMR |= (1 << (irq + PXA_IRQ_SKIP));
}
static struct irqchip pxa_internal_chip_low = {
static struct irq_chip pxa_internal_chip_low = {
.name = "SC",
.ack = pxa_mask_low_irq,
.mask = pxa_mask_low_irq,
.unmask = pxa_unmask_low_irq,
@ -61,7 +62,8 @@ static void pxa_unmask_high_irq(unsigned int irq)
ICMR2 |= (1 << (irq - 32 + PXA_IRQ_SKIP));
}
static struct irqchip pxa_internal_chip_high = {
static struct irq_chip pxa_internal_chip_high = {
.name = "SC-hi",
.ack = pxa_mask_high_irq,
.mask = pxa_mask_high_irq,
.unmask = pxa_unmask_high_irq,
@ -129,7 +131,8 @@ static void pxa_ack_low_gpio(unsigned int irq)
GEDR0 = (1 << (irq - IRQ_GPIO0));
}
static struct irqchip pxa_low_gpio_chip = {
static struct irq_chip pxa_low_gpio_chip = {
.name = "GPIO-l",
.ack = pxa_ack_low_gpio,
.mask = pxa_mask_low_irq,
.unmask = pxa_unmask_low_irq,
@ -237,7 +240,8 @@ static void pxa_unmask_muxed_gpio(unsigned int irq)
GFER(gpio) = GPIO_IRQ_falling_edge[idx] & GPIO_IRQ_mask[idx];
}
static struct irqchip pxa_muxed_gpio_chip = {
static struct irq_chip pxa_muxed_gpio_chip = {
.name = "GPIO",
.ack = pxa_ack_muxed_gpio,
.mask = pxa_mask_muxed_gpio,
.unmask = pxa_unmask_muxed_gpio,

View File

@ -68,7 +68,8 @@ static void lpd270_unmask_irq(unsigned int irq)
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct irqchip lpd270_irq_chip = {
static struct irq_chip lpd270_irq_chip = {
.name = "CPLD",
.ack = lpd270_mask_irq,
.mask = lpd270_mask_irq,
.unmask = lpd270_unmask_irq,

View File

@ -78,7 +78,8 @@ static void lubbock_unmask_irq(unsigned int irq)
LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
}
static struct irqchip lubbock_irq_chip = {
static struct irq_chip lubbock_irq_chip = {
.name = "FPGA",
.ack = lubbock_mask_irq,
.mask = lubbock_mask_irq,
.unmask = lubbock_unmask_irq,

View File

@ -64,7 +64,8 @@ static void mainstone_unmask_irq(unsigned int irq)
MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
}
static struct irqchip mainstone_irq_chip = {
static struct irq_chip mainstone_irq_chip = {
.name = "FPGA",
.ack = mainstone_mask_irq,
.mask = mainstone_mask_irq,
.unmask = mainstone_unmask_irq,

View File

@ -10,45 +10,47 @@ obj-m :=
obj-n :=
obj- :=
# DMA
obj-$(CONFIG_S3C2410_DMA) += dma.o
# S3C2400 support files
obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
# S3C2410 support files
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
# Power Management support
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
# S3C2412 support
obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
#
# S3C244X support
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
# Clock control
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
# S3C2440 support
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
# S3C2442 support
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
# bast extras

View File

@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
}
static void
dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
struct s3c2410_dma_regstate *regs)
{
printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
@ -132,7 +132,16 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
chan->number, fname, line, chan->load_state,
chan->curr, chan->next, chan->end);
dmadbg_showregs(fname, line, chan, &state);
dmadbg_dumpregs(fname, line, chan, &state);
}
static void
dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
{
struct s3c2410_dma_regstate state;
dmadbg_capture(chan, &state);
dmadbg_dumpregs(fname, line, chan, &state);
}
#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
@ -253,10 +262,14 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
buf->next);
reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
} else {
pr_debug("load_state is %d => autoreload\n", chan->load_state);
//pr_debug("load_state is %d => autoreload\n", chan->load_state);
reload = S3C2410_DCON_AUTORELOAD;
}
if ((buf->data & 0xf0000000) != 0x30000000) {
dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
}
writel(buf->data, chan->addr_reg);
dma_wrreg(chan, S3C2410_DMA_DCON,
@ -370,7 +383,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
tmp |= S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
pr_debug("wrote %08lx to DMASKTRIG\n", tmp);
pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
#if 0
/* the dma buffer loads should take care of clearing the AUTO
@ -384,7 +397,30 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__FUNCTION__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
@ -436,12 +472,11 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
if (buf == NULL) {
pr_debug("%s: out of memory (%ld alloc)\n",
__FUNCTION__, sizeof(*buf));
__FUNCTION__, (long)sizeof(*buf));
return -ENOMEM;
}
pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
//pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
//dbg_showchan(chan);
buf->next = NULL;
@ -537,14 +572,20 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load\n",
chan->number);
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __FUNCTION__);
return;
}
break;
case S3C2410_DMALOAD_1LOADED_1RUNNING:
/* I belive in this case we do not have anything to do
* until the next buffer comes along, and we turn off the
* reload */
return;
default:
pr_debug("dma%d: lastxfer: unhandled load_state %d with no next",
pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
chan->number, chan->load_state);
return;
@ -629,7 +670,14 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
} else {
}
if (chan->next != NULL) {
/* only reload if the channel is still running... our buffer done
* routine may have altered the state by requesting the dma channel
* to stop or shutdown... */
/* todo: check that when the channel is shut-down from inside this
* function, we cope with unsetting reload, etc */
if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
unsigned long flags;
switch (chan->load_state) {
@ -644,8 +692,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load\n",
chan->number);
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __FUNCTION__);
return IRQ_HANDLED;
}
@ -678,8 +726,6 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
return IRQ_HANDLED;
}
/* s3c2410_request_dma
*
* get control of an dma channel
@ -718,11 +764,17 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
pr_debug("dma%d: %s : requesting irq %d\n",
channel, __FUNCTION__, chan->irq);
chan->irq_claimed = 1;
local_irq_restore(flags);
err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
client->name, (void *)chan);
local_irq_save(flags);
if (err) {
chan->in_use = 0;
chan->irq_claimed = 0;
local_irq_restore(flags);
printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
@ -730,7 +782,6 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
return err;
}
chan->irq_claimed = 1;
chan->irq_enabled = 1;
}
@ -810,6 +861,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
tmp |= S3C2410_DMASKTRIG_STOP;
//tmp &= ~S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
#if 0
@ -819,6 +871,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
#endif
/* should stop do this, or should we wait for flush? */
chan->state = S3C2410_DMA_IDLE;
chan->load_state = S3C2410_DMALOAD_NONE;
@ -827,6 +880,22 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
return 0;
}
void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
{
unsigned long tmp;
unsigned int timeout = 0x10000;
while (timeout-- > 0) {
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
if (!(tmp & S3C2410_DMASKTRIG_ON))
return;
}
pr_debug("dma%d: failed to stop?\n", chan->number);
}
/* s3c2410_dma_flush
*
* stop the channel, and remove all current and pending transfers
@ -837,7 +906,9 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
s3c2410_dma_buf_t *buf, *next;
unsigned long flags;
pr_debug("%s:\n", __FUNCTION__);
pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
dbg_showchan(chan);
local_irq_save(flags);
@ -864,11 +935,64 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
}
}
dbg_showregs(chan);
s3c2410_dma_waitforstop(chan);
#if 0
/* should also clear interrupts, according to WinCE BSP */
{
unsigned long tmp;
tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
tmp |= S3C2410_DCON_NORELOAD;
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
}
#endif
dbg_showregs(chan);
local_irq_restore(flags);
return 0;
}
int
s3c2410_dma_started(s3c2410_dma_chan_t *chan)
{
unsigned long flags;
local_irq_save(flags);
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__FUNCTION__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
int
s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
@ -885,14 +1009,15 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
return s3c2410_dma_dostop(chan);
case S3C2410_DMAOP_PAUSE:
return -ENOENT;
case S3C2410_DMAOP_RESUME:
return -ENOENT;
case S3C2410_DMAOP_FLUSH:
return s3c2410_dma_flush(chan);
case S3C2410_DMAOP_STARTED:
return s3c2410_dma_started(chan);
case S3C2410_DMAOP_TIMEOUT:
return 0;

View File

@ -60,11 +60,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
.pfn = __phys_to_pfn(0x0),
.length = SZ_4M,
.type = MT_DEVICE
.type = MT_DEVICE,
}, {
.virtual = (u32)S3C24XX_VA_ISA_WORD,
.pfn = __phys_to_pfn(0x0),
.length = SZ_4M, MT_DEVICE
.length = SZ_4M,
.type = MT_DEVICE,
},
/* we could possibly compress the next set down into a set of smaller tables
@ -78,36 +79,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
.virtual = (u32)ANUBIS_VA_CTRL1,
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
.length = SZ_4K,
.type = MT_DEVICE
.type = MT_DEVICE,
}, {
.virtual = (u32)ANUBIS_VA_CTRL2,
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL2),
.length = SZ_4K,
.type =MT_DEVICE
},
/* IDE drives */
{
.virtual = (u32)ANUBIS_IDEPRI,
.pfn = __phys_to_pfn(S3C2410_CS3),
.length = SZ_1M,
.type = MT_DEVICE
}, {
.virtual = (u32)ANUBIS_IDEPRIAUX,
.pfn = __phys_to_pfn(S3C2410_CS3+(1<<26)),
.length = SZ_1M,
.type = MT_DEVICE
}, {
.virtual = (u32)ANUBIS_IDESEC,
.pfn = __phys_to_pfn(S3C2410_CS4),
.length = SZ_1M,
.type = MT_DEVICE
}, {
.virtual = (u32)ANUBIS_IDESECAUX,
.pfn = __phys_to_pfn(S3C2410_CS4+(1<<26)),
.length = SZ_1M,
.type = MT_DEVICE
.type = MT_DEVICE,
},
};
@ -126,7 +103,7 @@ static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
.name = "pclk",
.divisor = 1,
.min_baud = 0,
.max_baud = 0.
.max_baud = 0,
}
};
@ -139,7 +116,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = anubis_serial_clocks,
.clocks_size = ARRAY_SIZE(anubis_serial_clocks)
.clocks_size = ARRAY_SIZE(anubis_serial_clocks),
},
[1] = {
.hwport = 2,
@ -148,7 +125,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = anubis_serial_clocks,
.clocks_size = ARRAY_SIZE(anubis_serial_clocks)
.clocks_size = ARRAY_SIZE(anubis_serial_clocks),
},
};
@ -162,7 +139,7 @@ static struct mtd_partition anubis_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
.offset = 0
.offset = 0,
},
[1] = {
.name = "/boot",
@ -194,21 +171,21 @@ static struct s3c2410_nand_set anubis_nand_sets[] = {
.nr_chips = 1,
.nr_map = external_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part
.partitions = anubis_default_nand_part,
},
[0] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part
.partitions = anubis_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
.partitions = anubis_default_nand_part
.partitions = anubis_default_nand_part,
},
};
@ -313,7 +290,7 @@ static struct s3c24xx_board anubis_board __initdata = {
.devices = anubis_devices,
.devices_count = ARRAY_SIZE(anubis_devices),
.clocks = anubis_clocks,
.clocks_count = ARRAY_SIZE(anubis_clocks)
.clocks_count = ARRAY_SIZE(anubis_clocks),
};
static void __init anubis_map_io(void)

View File

@ -67,12 +67,12 @@ static struct map_desc osiris_iodesc[] __initdata = {
.virtual = (u32)OSIRIS_VA_CTRL1,
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL1),
.length = SZ_16K,
.type = MT_DEVICE
.type = MT_DEVICE,
}, {
.virtual = (u32)OSIRIS_VA_CTRL2,
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL2),
.length = SZ_16K,
.type = MT_DEVICE
.type = MT_DEVICE,
},
};
@ -91,7 +91,7 @@ static struct s3c24xx_uart_clksrc osiris_serial_clocks[] = {
.name = "pclk",
.divisor = 1,
.min_baud = 0,
.max_baud = 0.
.max_baud = 0,
}
};
@ -103,7 +103,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = osiris_serial_clocks,
.clocks_size = ARRAY_SIZE(osiris_serial_clocks)
.clocks_size = ARRAY_SIZE(osiris_serial_clocks),
},
[1] = {
.hwport = 1,
@ -112,7 +112,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
.ulcon = ULCON,
.ufcon = UFCON,
.clocks = osiris_serial_clocks,
.clocks_size = ARRAY_SIZE(osiris_serial_clocks)
.clocks_size = ARRAY_SIZE(osiris_serial_clocks),
},
};
@ -126,7 +126,7 @@ static struct mtd_partition osiris_default_nand_part[] = {
[0] = {
.name = "Boot Agent",
.size = SZ_16K,
.offset = 0
.offset = 0,
},
[1] = {
.name = "/boot",
@ -158,21 +158,21 @@ static struct s3c2410_nand_set osiris_nand_sets[] = {
.nr_chips = 1,
.nr_map = external_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
.partitions = osiris_default_nand_part
.partitions = osiris_default_nand_part,
},
[0] = {
.name = "chip0",
.nr_chips = 1,
.nr_map = chip0_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
.partitions = osiris_default_nand_part
.partitions = osiris_default_nand_part,
},
[2] = {
.name = "chip1",
.nr_chips = 1,
.nr_map = chip1_map,
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
.partitions = osiris_default_nand_part
.partitions = osiris_default_nand_part,
},
};
@ -245,7 +245,7 @@ static struct s3c24xx_board osiris_board __initdata = {
.devices = osiris_devices,
.devices_count = ARRAY_SIZE(osiris_devices),
.clocks = osiris_clocks,
.clocks_count = ARRAY_SIZE(osiris_clocks)
.clocks_count = ARRAY_SIZE(osiris_clocks),
};
static void __init osiris_map_io(void)

View File

@ -95,7 +95,8 @@ static int sa1100_low_gpio_wake(unsigned int irq, unsigned int on)
return 0;
}
static struct irqchip sa1100_low_gpio_chip = {
static struct irq_chip sa1100_low_gpio_chip = {
.name = "GPIO-l",
.ack = sa1100_low_gpio_ack,
.mask = sa1100_low_gpio_mask,
.unmask = sa1100_low_gpio_unmask,
@ -178,7 +179,8 @@ static int sa1100_high_gpio_wake(unsigned int irq, unsigned int on)
return 0;
}
static struct irqchip sa1100_high_gpio_chip = {
static struct irq_chip sa1100_high_gpio_chip = {
.name = "GPIO-h",
.ack = sa1100_high_gpio_ack,
.mask = sa1100_high_gpio_mask,
.unmask = sa1100_high_gpio_unmask,
@ -215,7 +217,8 @@ static int sa1100_set_wake(unsigned int irq, unsigned int on)
return -EINVAL;
}
static struct irqchip sa1100_normal_chip = {
static struct irq_chip sa1100_normal_chip = {
.name = "SC",
.ack = sa1100_mask_irq,
.mask = sa1100_mask_irq,
.unmask = sa1100_unmask_irq,

View File

@ -69,7 +69,8 @@ static irqreturn_t bogus_int(int irq, void *dev_id, struct pt_regs *regs)
static struct irqaction cascade;
static struct irqchip fb_chip = {
static struct irq_chip fb_chip = {
.name = "XT-PIC",
.ack = shark_ack_8259A_irq,
.mask = shark_disable_8259A_irq,
.unmask = shark_enable_8259A_irq,

View File

@ -69,7 +69,8 @@ static void sic_unmask_irq(unsigned int irq)
writel(1 << irq, VA_SIC_BASE + SIC_IRQ_ENABLE_SET);
}
static struct irqchip sic_chip = {
static struct irq_chip sic_chip = {
.name = "SIC",
.ack = sic_mask_irq,
.mask = sic_mask_irq,
.unmask = sic_unmask_irq,
@ -284,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = {
static struct resource versatile_flash_resource = {
.start = VERSATILE_FLASH_BASE,
.end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE,
.end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};

View File

@ -363,7 +363,9 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
#endif
unsigned int section_mapping = 0;
addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);

View File

@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
@ -30,6 +31,13 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(cpu_cache);
#endif
#ifndef MULTI_USER
EXPORT_SYMBOL(__cpu_clear_user_page);
EXPORT_SYMBOL(__cpu_copy_user_page);
#else
EXPORT_SYMBOL(cpu_user);
#endif
/*
* No module should need to touch the TLB (and currently
* no modules do. We export this for "loadkernel" support

View File

@ -536,6 +536,11 @@ cpu_80200_name:
.asciz "XScale-80200"
.size cpu_80200_name, . - cpu_80200_name
.type cpu_80219_name, #object
cpu_80219_name:
.asciz "XScale-80219"
.size cpu_80219_name, . - cpu_80219_name
.type cpu_8032x_name, #object
cpu_8032x_name:
.asciz "XScale-IOP8032x Family"
@ -613,10 +618,33 @@ __80200_proc_info:
.long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info
.type __80219_proc_info,#object
__80219_proc_info:
.long 0x69052e20
.long 0xffffffe0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_80219_name
.long xscale_processor_functions
.long v4wbi_tlb_fns
.long xscale_mc_user_fns
.long xscale_cache_fns
.size __80219_proc_info, . - __80219_proc_info
.type __8032x_proc_info,#object
__8032x_proc_info:
.long 0x69052420
.long 0xfffff5e0 @ mask should accomodate IOP80219 also
.long 0xffffffe0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \

View File

@ -944,7 +944,8 @@ static void mpuio_unmask_irq(unsigned int irq)
_set_gpio_irqenable(bank, gpio, 1);
}
static struct irqchip gpio_irq_chip = {
static struct irq_chip gpio_irq_chip = {
.name = "GPIO",
.ack = gpio_ack_irq,
.mask = gpio_mask_irq,
.unmask = gpio_unmask_irq,
@ -952,10 +953,11 @@ static struct irqchip gpio_irq_chip = {
.set_wake = gpio_wake_enable,
};
static struct irqchip mpuio_irq_chip = {
static struct irq_chip mpuio_irq_chip = {
.name = "MPUIO",
.ack = mpuio_ack_irq,
.mask = mpuio_mask_irq,
.unmask = mpuio_unmask_irq
.unmask = mpuio_unmask_irq
};
static int initialized;

View File

@ -142,6 +142,7 @@ config X86_SUMMIT
In particular, it is needed for the x440.
If you don't have one of these computers, you should say N here.
If you want to build a NUMA kernel, you must select ACPI.
config X86_BIGSMP
bool "Support for other sub-arch SMP systems with more than 8 CPUs"
@ -169,6 +170,7 @@ config X86_GENERICARCH
help
This option compiles in the Summit, bigsmp, ES7000, default subarchitectures.
It is intended for a generic binary kernel.
If you want a NUMA kernel, select ACPI. We need SRAT for NUMA.
config X86_ES7000
bool "Support for Unisys ES7000 IA32 series"
@ -542,7 +544,7 @@ config X86_PAE
# Common NUMA Features
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI)
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT)
@ -672,7 +674,7 @@ config MTRR
See <file:Documentation/mtrr.txt> for more information.
config EFI
bool "Boot from EFI support (EXPERIMENTAL)"
bool "Boot from EFI support"
depends on ACPI
default n
---help---

View File

@ -59,7 +59,8 @@ quiet_cmd_syscall = SYSCALL $@
export CPPFLAGS_vsyscall.lds += -P -C -U$(ARCH)
vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)

View File

@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; }
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((acpi_table_entry_header *)entry)->length != sizeof(*entry))
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "

View File

@ -292,7 +292,10 @@ ENTRY(do_suspend_lowlevel)
pushl $3
call acpi_enter_sleep_state
addl $4, %esp
ret
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
jmp ret_point
.p2align 4,,7
ret_point:
call restore_registers

View File

@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
@ -202,7 +203,7 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
depends on BROKEN
depends on ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T

View File

@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void)
}
/* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
return acpi_processor_preregister_performance(acpi_perf_data);
}
static int
@ -568,16 +567,11 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static int __init
acpi_cpufreq_init (void)
{
int result = 0;
dprintk("acpi_cpufreq_init\n");
result = acpi_cpufreq_early_init_acpi();
acpi_cpufreq_early_init_acpi();
if (!result)
result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
return cpufreq_register_driver(&acpi_cpufreq_driver);
}

View File

@ -29,11 +29,13 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/acpi.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include "longhaul.h"
@ -56,6 +58,8 @@ static int minvid, maxvid;
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static int vrmrev;
static struct acpi_processor *pr = NULL;
static struct acpi_processor_cx *cx = NULL;
/* Module parameters */
static int dont_scale_voltage;
@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void)
return eblcr_table[invalue];
}
/* For processor with BCR2 MSR */
static void do_powersaver(union msr_longhaul *longhaul,
unsigned int clock_ratio_index)
static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
{
struct pci_dev *dev;
unsigned long flags;
unsigned int tmp_mask;
int version;
int i;
u16 pci_cmd;
u16 cmd_state[64];
union msr_bcr2 bcr2;
u32 t;
switch (cpu_model) {
case CPU_EZRA_T:
version = 3;
break;
case CPU_NEHEMIAH:
version = 0xf;
break;
default:
return;
}
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul->bits.EnableSoftBusRatio = 1;
longhaul->bits.RevisionKey = 0;
preempt_disable();
local_irq_save(flags);
/*
* get current pci bus master state for all devices
* and clear bus master bit
*/
dev = NULL;
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
cmd_state[i++] = pci_cmd;
pci_cmd &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
tmp_mask=inb(0x21); /* works on C3. save mask. */
outb(0xFE,0x21); /* TMR0 only */
outb(0xFF,0x80); /* delay */
rdmsrl(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = clock_ratio_index;
/* Sync to timer tick */
safe_halt();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
halt();
ACPI_FLUSH_CPU_CACHE();
/* Change frequency on next halt or sleep */
wrmsrl(MSR_VIA_BCR2, bcr2.val);
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
/* Disable software clock multiplier */
local_irq_disable();
rdmsrl(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
wrmsrl(MSR_VIA_BCR2, bcr2.val);
}
outb(tmp_mask,0x21); /* restore mask */
/* For processor with Longhaul MSR */
/* restore pci bus master state for all devices */
dev = NULL;
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_cmd = cmd_state[i++];
pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
local_irq_restore(flags);
preempt_enable();
static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
{
union msr_longhaul longhaul;
u32 t;
/* disable bus ratio bit */
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.EnableSoftBusRatio = 0;
longhaul->bits.RevisionKey = version;
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul.bits.EnableSoftBusRatio = 1;
/* Sync to timer tick */
safe_halt();
ACPI_FLUSH_CPU_CACHE();
/* Change frequency on next halt or sleep */
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
/* Disable bus ratio bit */
local_irq_disable();
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
longhaul.bits.EnableSoftBusRatio = 0;
longhaul.bits.EnableSoftBSEL = 0;
longhaul.bits.EnableSoftVID = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
}
/**
@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
{
int speed, mult;
struct cpufreq_freqs freqs;
union msr_longhaul longhaul;
union msr_bcr2 bcr2;
static unsigned int old_ratio=-1;
unsigned long flags;
unsigned int pic1_mask, pic2_mask;
if (old_ratio == clock_ratio_index)
return;
@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
preempt_disable();
local_irq_save(flags);
pic2_mask = inb(0xA1);
pic1_mask = inb(0x21); /* works on C3. save mask. */
outb(0xFF,0xA1); /* Overkill */
outb(0xFE,0x21); /* TMR0 only */
/* Disable bus master arbitration */
if (pr->flags.bm_check) {
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
ACPI_MTX_DO_NOT_LOCK);
}
switch (longhaul_version) {
/*
@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
*/
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
rdmsrl (MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = clock_ratio_index;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val);
safe_halt();
/* Disable software clock multiplier */
rdmsrl (MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val);
local_irq_enable();
do_longhaul1(cx->address, clock_ratio_index);
break;
/*
@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
* to work in practice.
*/
case TYPE_POWERSAVER:
do_powersaver(&longhaul, clock_ratio_index);
do_powersaver(cx->address, clock_ratio_index);
break;
}
/* Enable bus master arbitration */
if (pr->flags.bm_check) {
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
ACPI_MTX_DO_NOT_LOCK);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);
local_irq_restore(flags);
preempt_enable();
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
@ -324,9 +322,11 @@ static int guess_fsb(void)
static int __init longhaul_get_ranges(void)
{
unsigned long invalue;
unsigned int multipliers[32]= {
50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65,
-1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 };
unsigned int ezra_t_multipliers[32]= {
90, 30, 40, 100, 55, 35, 45, 95,
50, 70, 80, 60, 120, 75, 85, 65,
-1, 110, 120, -1, 135, 115, 125, 105,
130, 150, 160, 140, -1, 155, -1, 145 };
unsigned int j, k = 0;
union msr_longhaul longhaul;
unsigned long lo, hi;
@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void)
invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4)
invalue += 16;
maxmult=multipliers[invalue];
maxmult=ezra_t_multipliers[invalue];
invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30;
else
minmult = multipliers[invalue];
minmult = ezra_t_multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break;
}
@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu)
return calc_speed(longhaul_get_cpu_mult());
}
static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
struct acpi_device *d;
if ( acpi_bus_get_device(obj_handle, &d) ) {
return 0;
}
*return_value = (void *)acpi_driver_data(d);
return 1;
}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{
@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
char *cpuname=NULL;
int ret;
/* Check ACPI support for C3 state */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
&longhaul_walk_callback, NULL, (void *)&pr);
if (pr == NULL) goto err_acpi;
cx = &pr->power.states[ACPI_STATE_C3];
if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
/* Now check what we have on this motherboard */
switch (c->x86_model) {
case 6:
cpu_model = CPU_SAMUEL;
@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
return 0;
err_acpi:
printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
return -ENODEV;
}
static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@ -666,6 +691,18 @@ static int __init longhaul_init(void)
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV;
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
return -ENODEV;
printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (cpu_has_apic) {
printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
switch (c->x86_model) {
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE ("GPL");
module_init(longhaul_init);
late_initcall(longhaul_init);
module_exit(longhaul_exit);

View File

@ -759,7 +759,7 @@ static int __cpuinit cache_sysfs_init(void)
if (num_cache_leaves == 0)
return 0;
register_cpu_notifier(&cacheinfo_cpu_notifier);
register_hotcpu_notifier(&cacheinfo_cpu_notifier);
for_each_online_cpu(i) {
cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,

View File

@ -9,6 +9,6 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c);
/* Call the installed machine check handler for this CPU setup. */
extern fastcall void (*machine_check_vector)(struct pt_regs *, long error_code);
extern int mce_disabled __initdata;
extern int mce_disabled;
extern int nr_mce_banks;

View File

@ -204,7 +204,7 @@ VM_MASK = 0x00020000
ENTRY(ret_from_fork)
CFI_STARTPROC
pushl %eax
CFI_ADJUST_CFA_OFFSET -4
CFI_ADJUST_CFA_OFFSET 4
call schedule_tail
GET_THREAD_INFO(%ebp)
popl %eax

View File

@ -256,11 +256,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
int ret = 0;
kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
#ifdef CONFIG_PREEMPT
unsigned pre_preempt_count = preempt_count();
#else
unsigned pre_preempt_count = 1;
#endif
addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t));
@ -338,13 +333,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
return 1;
ss_probe:
if (pre_preempt_count && p->ainsn.boostable == 1 && !p->post_handler){
#ifndef CONFIG_PREEMPT
if (p->ainsn.boostable == 1 && !p->post_handler){
/* Boost up -- we can execute copied instructions directly */
reset_current_kprobe();
regs->eip = (unsigned long)p->ainsn.insn;
preempt_enable_no_resched();
return 1;
}
#endif
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;

View File

@ -189,14 +189,11 @@ NORET_TYPE void machine_kexec(struct kimage *image)
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
/* The segment registers are funny things, they are
* automatically loaded from a table, in memory wherever you
* set them to a specific selector, but this table is never
* accessed again you set the segment to a different selector.
*
* The more common model is are caches where the behide
* the scenes work is done, but is also dropped at arbitrary
* times.
/* The segment registers are funny things, they have both a
* visible and an invisible part. Whenever the visible part is
* set to a specific selector, the invisible part is loaded
* with from a table in memory. At no other time is the
* descriptor table in memory accessed.
*
* I take advantage of this here by force loading the
* segments, before I zap the gdt with an invalid value.

View File

@ -575,6 +575,7 @@ void touch_nmi_watchdog (void)
*/
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
extern void die_nmi(struct pt_regs *, const char *msg);

View File

@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
/*
* Now maybe handle debug registers and/or IO bitmaps
*/
if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
|| test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
|| test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
__switch_to_xtra(next_p, tss);
disable_tsc(prev_p, next_p);

View File

@ -212,14 +212,20 @@ valid_k7:
* then we print a warning if not, and always resync.
*/
static atomic_t tsc_start_flag = ATOMIC_INIT(0);
static atomic_t tsc_count_start = ATOMIC_INIT(0);
static atomic_t tsc_count_stop = ATOMIC_INIT(0);
static unsigned long long tsc_values[NR_CPUS];
static struct {
atomic_t start_flag;
atomic_t count_start;
atomic_t count_stop;
unsigned long long values[NR_CPUS];
} tsc __initdata = {
.start_flag = ATOMIC_INIT(0),
.count_start = ATOMIC_INIT(0),
.count_stop = ATOMIC_INIT(0),
};
#define NR_LOOPS 5
static void __init synchronize_tsc_bp (void)
static void __init synchronize_tsc_bp(void)
{
int i;
unsigned long long t0;
@ -233,7 +239,7 @@ static void __init synchronize_tsc_bp (void)
/* convert from kcyc/sec to cyc/usec */
one_usec = cpu_khz / 1000;
atomic_set(&tsc_start_flag, 1);
atomic_set(&tsc.start_flag, 1);
wmb();
/*
@ -250,16 +256,16 @@ static void __init synchronize_tsc_bp (void)
/*
* all APs synchronize but they loop on '== num_cpus'
*/
while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
while (atomic_read(&tsc.count_start) != num_booting_cpus()-1)
cpu_relax();
atomic_set(&tsc_count_stop, 0);
atomic_set(&tsc.count_stop, 0);
wmb();
/*
* this lets the APs save their current TSC:
*/
atomic_inc(&tsc_count_start);
atomic_inc(&tsc.count_start);
rdtscll(tsc_values[smp_processor_id()]);
rdtscll(tsc.values[smp_processor_id()]);
/*
* We clear the TSC in the last loop:
*/
@ -269,56 +275,54 @@ static void __init synchronize_tsc_bp (void)
/*
* Wait for all APs to leave the synchronization point:
*/
while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
while (atomic_read(&tsc.count_stop) != num_booting_cpus()-1)
cpu_relax();
atomic_set(&tsc_count_start, 0);
atomic_set(&tsc.count_start, 0);
wmb();
atomic_inc(&tsc_count_stop);
atomic_inc(&tsc.count_stop);
}
sum = 0;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_isset(i, cpu_callout_map)) {
t0 = tsc_values[i];
t0 = tsc.values[i];
sum += t0;
}
}
avg = sum;
do_div(avg, num_booting_cpus());
sum = 0;
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
delta = tsc_values[i] - avg;
delta = tsc.values[i] - avg;
if (delta < 0)
delta = -delta;
/*
* We report bigger than 2 microseconds clock differences.
*/
if (delta > 2*one_usec) {
long realdelta;
long long realdelta;
if (!buggy) {
buggy = 1;
printk("\n");
}
realdelta = delta;
do_div(realdelta, one_usec);
if (tsc_values[i] < avg)
if (tsc.values[i] < avg)
realdelta = -realdelta;
if (realdelta > 0)
printk(KERN_INFO "CPU#%d had %ld usecs TSC "
if (realdelta)
printk(KERN_INFO "CPU#%d had %Ld usecs TSC "
"skew, fixed it up.\n", i, realdelta);
}
sum += delta;
}
if (!buggy)
printk("passed.\n");
}
static void __init synchronize_tsc_ap (void)
static void __init synchronize_tsc_ap(void)
{
int i;
@ -327,20 +331,20 @@ static void __init synchronize_tsc_ap (void)
* this gets called, so we first wait for the BP to
* finish SMP initialization:
*/
while (!atomic_read(&tsc_start_flag))
while (!atomic_read(&tsc.start_flag))
cpu_relax();
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&tsc_count_start);
while (atomic_read(&tsc_count_start) != num_booting_cpus())
atomic_inc(&tsc.count_start);
while (atomic_read(&tsc.count_start) != num_booting_cpus())
cpu_relax();
rdtscll(tsc_values[smp_processor_id()]);
rdtscll(tsc.values[smp_processor_id()]);
if (i == NR_LOOPS-1)
write_tsc(0, 0);
atomic_inc(&tsc_count_stop);
while (atomic_read(&tsc_count_stop) != num_booting_cpus())
atomic_inc(&tsc.count_stop);
while (atomic_read(&tsc.count_stop) != num_booting_cpus())
cpu_relax();
}
}

View File

@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (in_lock_functions(pc))
if (!user_mode_vm(regs) && in_lock_functions(pc))
return *(unsigned long *)(regs->ebp + 4);
return pc;

View File

@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (unwind_init_blocked(&info, task) == 0)
unw_ret = show_trace_unwind(&info, log_lvl);
}
if (unw_ret > 0) {
if (call_trace > 0)
if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
#ifdef CONFIG_STACK_UNWIND
print_symbol("DWARF2 unwinder stuck at %s\n",
UNW_PC(&info));
if (call_trace == 1) {
printk("Leftover inexact backtrace:\n");
if (UNW_SP(&info))
stack = (void *)UNW_SP(&info);
} else if (call_trace > 1)
return;
printk("%sLegacy call trace:\n", log_lvl);
else
printk("Full inexact backtrace again:\n");
#else
printk("Inexact backtrace:\n");
#endif
}
}
@ -442,11 +453,9 @@ void die(const char * str, struct pt_regs * regs, long err)
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
if (panic_on_oops)
panic("Fatal exception");
}
oops_exit();
do_exit(SIGSEGV);
}
@ -1238,8 +1247,10 @@ static int __init call_trace_setup(char *s)
call_trace = -1;
else if (strcmp(s, "both") == 0)
call_trace = 0;
else if (strcmp(s, "new") == 0)
else if (strcmp(s, "newfallback") == 0)
call_trace = 1;
else if (strcmp(s, "new") == 2)
call_trace = 2;
return 1;
}
__setup("call_trace=", call_trace_setup);

View File

@ -10,6 +10,7 @@ SECTIONS
. = VDSO_PRELINK + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }

View File

@ -14,8 +14,12 @@ static __init int pci_access_init(void)
#ifdef CONFIG_PCI_BIOS
pci_pcbios_init();
#endif
if (raw_pci_ops)
return 0;
/*
* don't check for raw_pci_ops here because we want pcbios as last
* fallback, yet it's needed to run first to set pcibios_last_bus
* in case legacy PCI probing is used. otherwise detecting peer busses
* fails.
*/
#ifdef CONFIG_PCI_DIRECT
pci_direct_init();
#endif

Some files were not shown because too many files have changed in this diff Show More