Merge branch 'master' into upstream

This commit is contained in:
Jeff Garzik 2006-08-29 17:20:55 -04:00
commit a422142cfd
223 changed files with 5831 additions and 3373 deletions

View file

@ -2209,7 +2209,7 @@ S: (address available on request)
S: USA
N: Ian McDonald
E: iam4@cs.waikato.ac.nz
E: ian.mcdonald@jandi.co.nz
E: imcdnzl@gmail.com
W: http://wand.net.nz/~iam4
W: http://imcdnzl.blogspot.com

View file

@ -0,0 +1,206 @@
/*
* ucon.c
*
* Copyright (c) 2004+ Evgeniy Polyakov <johnpol@2ka.mipt.ru>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/types.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/poll.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <linux/connector.h>
#define DEBUG
#define NETLINK_CONNECTOR 11
#ifdef DEBUG
#define ulog(f, a...) fprintf(stdout, f, ##a)
#else
#define ulog(f, a...) do {} while (0)
#endif
static int need_exit;
static __u32 seq;
static int netlink_send(int s, struct cn_msg *msg)
{
struct nlmsghdr *nlh;
unsigned int size;
int err;
char buf[128];
struct cn_msg *m;
size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
nlh = (struct nlmsghdr *)buf;
nlh->nlmsg_seq = seq++;
nlh->nlmsg_pid = getpid();
nlh->nlmsg_type = NLMSG_DONE;
nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
nlh->nlmsg_flags = 0;
m = NLMSG_DATA(nlh);
#if 0
ulog("%s: [%08x.%08x] len=%u, seq=%u, ack=%u.\n",
__func__, msg->id.idx, msg->id.val, msg->len, msg->seq, msg->ack);
#endif
memcpy(m, msg, sizeof(*m) + msg->len);
err = send(s, nlh, size, 0);
if (err == -1)
ulog("Failed to send: %s [%d].\n",
strerror(errno), errno);
return err;
}
int main(int argc, char *argv[])
{
int s;
char buf[1024];
int len;
struct nlmsghdr *reply;
struct sockaddr_nl l_local;
struct cn_msg *data;
FILE *out;
time_t tm;
struct pollfd pfd;
if (argc < 2)
out = stdout;
else {
out = fopen(argv[1], "a+");
if (!out) {
ulog("Unable to open %s for writing: %s\n",
argv[1], strerror(errno));
out = stdout;
}
}
memset(buf, 0, sizeof(buf));
s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (s == -1) {
perror("socket");
return -1;
}
l_local.nl_family = AF_NETLINK;
l_local.nl_groups = 0x123; /* bitmask of requested groups */
l_local.nl_pid = 0;
if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) {
perror("bind");
close(s);
return -1;
}
#if 0
{
int on = 0x57; /* Additional group number */
setsockopt(s, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &on, sizeof(on));
}
#endif
if (0) {
int i, j;
memset(buf, 0, sizeof(buf));
data = (struct cn_msg *)buf;
data->id.idx = 0x123;
data->id.val = 0x456;
data->seq = seq++;
data->ack = 0;
data->len = 0;
for (j=0; j<10; ++j) {
for (i=0; i<1000; ++i) {
len = netlink_send(s, data);
}
ulog("%d messages have been sent to %08x.%08x.\n", i, data->id.idx, data->id.val);
}
return 0;
}
pfd.fd = s;
while (!need_exit) {
pfd.events = POLLIN;
pfd.revents = 0;
switch (poll(&pfd, 1, -1)) {
case 0:
need_exit = 1;
break;
case -1:
if (errno != EINTR) {
need_exit = 1;
break;
}
continue;
}
if (need_exit)
break;
memset(buf, 0, sizeof(buf));
len = recv(s, buf, sizeof(buf), 0);
if (len == -1) {
perror("recv buf");
close(s);
return -1;
}
reply = (struct nlmsghdr *)buf;
switch (reply->nlmsg_type) {
case NLMSG_ERROR:
fprintf(out, "Error message received.\n");
fflush(out);
break;
case NLMSG_DONE:
data = (struct cn_msg *)NLMSG_DATA(reply);
time(&tm);
fprintf(out, "%.24s : [%x.%x] [%08u.%08u].\n",
ctime(&tm), data->id.idx, data->id.val, data->seq, data->ack);
fflush(out);
break;
default:
break;
}
}
close(s);
return 0;
}

View file

@ -217,6 +217,12 @@ exclusive cpuset. Also, the use of a Linux virtual file system (vfs)
to represent the cpuset hierarchy provides for a familiar permission
and name space for cpusets, with a minimum of additional kernel code.
The cpus file in the root (top_cpuset) cpuset is read-only.
It automatically tracks the value of cpu_online_map, using a CPU
hotplug notifier. If and when memory nodes can be hotplugged,
we expect to make the mems file in the root cpuset read-only
as well, and have it track the value of node_online_map.
1.4 What are exclusive cpusets ?
--------------------------------

View file

@ -62,8 +62,8 @@ ramfs-rootfs-initramfs.txt
- info on the 'in memory' filesystems ramfs, rootfs and initramfs.
reiser4.txt
- info on the Reiser4 filesystem based on dancing tree algorithms.
relayfs.txt
- info on relayfs, for efficient streaming from kernel to user space.
relay.txt
- info on relay, for efficient streaming from kernel to user space.
romfs.txt
- description of the ROMFS filesystem.
smbfs.txt

View file

@ -0,0 +1,479 @@
relay interface (formerly relayfs)
==================================
The relay interface provides a means for kernel applications to
efficiently log and transfer large quantities of data from the kernel
to userspace via user-defined 'relay channels'.
A 'relay channel' is a kernel->user data relay mechanism implemented
as a set of per-cpu kernel buffers ('channel buffers'), each
represented as a regular file ('relay file') in user space. Kernel
clients write into the channel buffers using efficient write
functions; these automatically log into the current cpu's channel
buffer. User space applications mmap() or read() from the relay files
and retrieve the data as it becomes available. The relay files
themselves are files created in a host filesystem, e.g. debugfs, and
are associated with the channel buffers using the API described below.
The format of the data logged into the channel buffers is completely
up to the kernel client; the relay interface does however provide
hooks which allow kernel clients to impose some structure on the
buffer data. The relay interface doesn't implement any form of data
filtering - this also is left to the kernel client. The purpose is to
keep things as simple as possible.
This document provides an overview of the relay interface API. The
details of the function parameters are documented along with the
functions in the relay interface code - please see that for details.
Semantics
=========
Each relay channel has one buffer per CPU, each buffer has one or more
sub-buffers. Messages are written to the first sub-buffer until it is
too full to contain a new message, in which case it it is written to
the next (if available). Messages are never split across sub-buffers.
At this point, userspace can be notified so it empties the first
sub-buffer, while the kernel continues writing to the next.
When notified that a sub-buffer is full, the kernel knows how many
bytes of it are padding i.e. unused space occurring because a complete
message couldn't fit into a sub-buffer. Userspace can use this
knowledge to copy only valid data.
After copying it, userspace can notify the kernel that a sub-buffer
has been consumed.
A relay channel can operate in a mode where it will overwrite data not
yet collected by userspace, and not wait for it to be consumed.
The relay channel itself does not provide for communication of such
data between userspace and kernel, allowing the kernel side to remain
simple and not impose a single interface on userspace. It does
provide a set of examples and a separate helper though, described
below.
The read() interface both removes padding and internally consumes the
read sub-buffers; thus in cases where read(2) is being used to drain
the channel buffers, special-purpose communication between kernel and
user isn't necessary for basic operation.
One of the major goals of the relay interface is to provide a low
overhead mechanism for conveying kernel data to userspace. While the
read() interface is easy to use, it's not as efficient as the mmap()
approach; the example code attempts to make the tradeoff between the
two approaches as small as possible.
klog and relay-apps example code
================================
The relay interface itself is ready to use, but to make things easier,
a couple simple utility functions and a set of examples are provided.
The relay-apps example tarball, available on the relay sourceforge
site, contains a set of self-contained examples, each consisting of a
pair of .c files containing boilerplate code for each of the user and
kernel sides of a relay application. When combined these two sets of
boilerplate code provide glue to easily stream data to disk, without
having to bother with mundane housekeeping chores.
The 'klog debugging functions' patch (klog.patch in the relay-apps
tarball) provides a couple of high-level logging functions to the
kernel which allow writing formatted text or raw data to a channel,
regardless of whether a channel to write into exists or not, or even
whether the relay interface is compiled into the kernel or not. These
functions allow you to put unconditional 'trace' statements anywhere
in the kernel or kernel modules; only when there is a 'klog handler'
registered will data actually be logged (see the klog and kleak
examples for details).
It is of course possible to use the relay interface from scratch,
i.e. without using any of the relay-apps example code or klog, but
you'll have to implement communication between userspace and kernel,
allowing both to convey the state of buffers (full, empty, amount of
padding). The read() interface both removes padding and internally
consumes the read sub-buffers; thus in cases where read(2) is being
used to drain the channel buffers, special-purpose communication
between kernel and user isn't necessary for basic operation. Things
such as buffer-full conditions would still need to be communicated via
some channel though.
klog and the relay-apps examples can be found in the relay-apps
tarball on http://relayfs.sourceforge.net
The relay interface user space API
==================================
The relay interface implements basic file operations for user space
access to relay channel buffer data. Here are the file operations
that are available and some comments regarding their behavior:
open() enables user to open an _existing_ channel buffer.
mmap() results in channel buffer being mapped into the caller's
memory space. Note that you can't do a partial mmap - you
must map the entire file, which is NRBUF * SUBBUFSIZE.
read() read the contents of a channel buffer. The bytes read are
'consumed' by the reader, i.e. they won't be available
again to subsequent reads. If the channel is being used
in no-overwrite mode (the default), it can be read at any
time even if there's an active kernel writer. If the
channel is being used in overwrite mode and there are
active channel writers, results may be unpredictable -
users should make sure that all logging to the channel has
ended before using read() with overwrite mode. Sub-buffer
padding is automatically removed and will not be seen by
the reader.
sendfile() transfer data from a channel buffer to an output file
descriptor. Sub-buffer padding is automatically removed
and will not be seen by the reader.
poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
notified when sub-buffer boundaries are crossed.
close() decrements the channel buffer's refcount. When the refcount
reaches 0, i.e. when no process or kernel client has the
buffer open, the channel buffer is freed.
In order for a user application to make use of relay files, the
host filesystem must be mounted. For example,
mount -t debugfs debugfs /debug
NOTE: the host filesystem doesn't need to be mounted for kernel
clients to create or use channels - it only needs to be
mounted when user space applications need access to the buffer
data.
The relay interface kernel API
==============================
Here's a summary of the API the relay interface provides to in-kernel clients:
TBD(curr. line MT:/API/)
channel management functions:
relay_open(base_filename, parent, subbuf_size, n_subbufs,
callbacks)
relay_close(chan)
relay_flush(chan)
relay_reset(chan)
channel management typically called on instigation of userspace:
relay_subbufs_consumed(chan, cpu, subbufs_consumed)
write functions:
relay_write(chan, data, length)
__relay_write(chan, data, length)
relay_reserve(chan, length)
callbacks:
subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
buf_mapped(buf, filp)
buf_unmapped(buf, filp)
create_buf_file(filename, parent, mode, buf, is_global)
remove_buf_file(dentry)
helper functions:
relay_buf_full(buf)
subbuf_start_reserve(buf, length)
Creating a channel
------------------
relay_open() is used to create a channel, along with its per-cpu
channel buffers. Each channel buffer will have an associated file
created for it in the host filesystem, which can be and mmapped or
read from in user space. The files are named basename0...basenameN-1
where N is the number of online cpus, and by default will be created
in the root of the filesystem (if the parent param is NULL). If you
want a directory structure to contain your relay files, you should
create it using the host filesystem's directory creation function,
e.g. debugfs_create_dir(), and pass the parent directory to
relay_open(). Users are responsible for cleaning up any directory
structure they create, when the channel is closed - again the host
filesystem's directory removal functions should be used for that,
e.g. debugfs_remove().
In order for a channel to be created and the host filesystem's files
associated with its channel buffers, the user must provide definitions
for two callback functions, create_buf_file() and remove_buf_file().
create_buf_file() is called once for each per-cpu buffer from
relay_open() and allows the user to create the file which will be used
to represent the corresponding channel buffer. The callback should
return the dentry of the file created to represent the channel buffer.
remove_buf_file() must also be defined; it's responsible for deleting
the file(s) created in create_buf_file() and is called during
relay_close().
Here are some typical definitions for these callbacks, in this case
using debugfs:
/*
* create_buf_file() callback. Creates relay file in debugfs.
*/
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent,
int mode,
struct rchan_buf *buf,
int *is_global)
{
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/*
* remove_buf_file() callback. Removes relay file from debugfs.
*/
static int remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
/*
* relay interface callbacks
*/
static struct rchan_callbacks relay_callbacks =
{
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
And an example relay_open() invocation using them:
chan = relay_open("cpu", NULL, SUBBUF_SIZE, N_SUBBUFS, &relay_callbacks);
If the create_buf_file() callback fails, or isn't defined, channel
creation and thus relay_open() will fail.
The total size of each per-cpu buffer is calculated by multiplying the
number of sub-buffers by the sub-buffer size passed into relay_open().
The idea behind sub-buffers is that they're basically an extension of
double-buffering to N buffers, and they also allow applications to
easily implement random-access-on-buffer-boundary schemes, which can
be important for some high-volume applications. The number and size
of sub-buffers is completely dependent on the application and even for
the same application, different conditions will warrant different
values for these parameters at different times. Typically, the right
values to use are best decided after some experimentation; in general,
though, it's safe to assume that having only 1 sub-buffer is a bad
idea - you're guaranteed to either overwrite data or lose events
depending on the channel mode being used.
The create_buf_file() implementation can also be defined in such a way
as to allow the creation of a single 'global' buffer instead of the
default per-cpu set. This can be useful for applications interested
mainly in seeing the relative ordering of system-wide events without
the need to bother with saving explicit timestamps for the purpose of
merging/sorting per-cpu files in a postprocessing step.
To have relay_open() create a global buffer, the create_buf_file()
implementation should set the value of the is_global outparam to a
non-zero value in addition to creating the file that will be used to
represent the single buffer. In the case of a global buffer,
create_buf_file() and remove_buf_file() will be called only once. The
normal channel-writing functions, e.g. relay_write(), can still be
used - writes from any cpu will transparently end up in the global
buffer - but since it is a global buffer, callers should make sure
they use the proper locking for such a buffer, either by wrapping
writes in a spinlock, or by copying a write function from relay.h and
creating a local version that internally does the proper locking.
Channel 'modes'
---------------
relay channels can be used in either of two modes - 'overwrite' or
'no-overwrite'. The mode is entirely determined by the implementation
of the subbuf_start() callback, as described below. The default if no
subbuf_start() callback is defined is 'no-overwrite' mode. If the
default mode suits your needs, and you plan to use the read()
interface to retrieve channel data, you can ignore the details of this
section, as it pertains mainly to mmap() implementations.
In 'overwrite' mode, also known as 'flight recorder' mode, writes
continuously cycle around the buffer and will never fail, but will
unconditionally overwrite old data regardless of whether it's actually
been consumed. In no-overwrite mode, writes will fail, i.e. data will
be lost, if the number of unconsumed sub-buffers equals the total
number of sub-buffers in the channel. It should be clear that if
there is no consumer or if the consumer can't consume sub-buffers fast
enough, data will be lost in either case; the only difference is
whether data is lost from the beginning or the end of a buffer.
As explained above, a relay channel is made of up one or more
per-cpu channel buffers, each implemented as a circular buffer
subdivided into one or more sub-buffers. Messages are written into
the current sub-buffer of the channel's current per-cpu buffer via the
write functions described below. Whenever a message can't fit into
the current sub-buffer, because there's no room left for it, the
client is notified via the subbuf_start() callback that a switch to a
new sub-buffer is about to occur. The client uses this callback to 1)
initialize the next sub-buffer if appropriate 2) finalize the previous
sub-buffer if appropriate and 3) return a boolean value indicating
whether or not to actually move on to the next sub-buffer.
To implement 'no-overwrite' mode, the userspace client would provide
an implementation of the subbuf_start() callback something like the
following:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
if (relay_buf_full(buf))
return 0;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
If the current buffer is full, i.e. all sub-buffers remain unconsumed,
the callback returns 0 to indicate that the buffer switch should not
occur yet, i.e. until the consumer has had a chance to read the
current set of ready sub-buffers. For the relay_buf_full() function
to make sense, the consumer is reponsible for notifying the relay
interface when sub-buffers have been consumed via
relay_subbufs_consumed(). Any subsequent attempts to write into the
buffer will again invoke the subbuf_start() callback with the same
parameters; only when the consumer has consumed one or more of the
ready sub-buffers will relay_buf_full() return 0, in which case the
buffer switch can continue.
The implementation of the subbuf_start() callback for 'overwrite' mode
would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
In this case, the relay_buf_full() check is meaningless and the
callback always returns 1, causing the buffer switch to occur
unconditionally. It's also meaningless for the client to use the
relay_subbufs_consumed() function in this mode, as it's never
consulted.
The default subbuf_start() implementation, used if the client doesn't
define any callbacks, or doesn't define the subbuf_start() callback,
implements the simplest possible 'no-overwrite' mode, i.e. it does
nothing but return 0.
Header information can be reserved at the beginning of each sub-buffer
by calling the subbuf_start_reserve() helper function from within the
subbuf_start() callback. This reserved area can be used to store
whatever information the client wants. In the example above, room is
reserved in each sub-buffer to store the padding count for that
sub-buffer. This is filled in for the previous sub-buffer in the
subbuf_start() implementation; the padding value for the previous
sub-buffer is passed into the subbuf_start() callback along with a
pointer to the previous sub-buffer, since the padding value isn't
known until a sub-buffer is filled. The subbuf_start() callback is
also called for the first sub-buffer when the channel is opened, to
give the client a chance to reserve space in it. In this case the
previous sub-buffer pointer passed into the callback will be NULL, so
the client should check the value of the prev_subbuf pointer before
writing into the previous sub-buffer.
Writing to a channel
--------------------
Kernel clients write data into the current cpu's channel buffer using
relay_write() or __relay_write(). relay_write() is the main logging
function - it uses local_irqsave() to protect the buffer and should be
used if you might be logging from interrupt context. If you know
you'll never be logging from interrupt context, you can use
__relay_write(), which only disables preemption. These functions
don't return a value, so you can't determine whether or not they
failed - the assumption is that you wouldn't want to check a return
value in the fast logging path anyway, and that they'll always succeed
unless the buffer is full and no-overwrite mode is being used, in
which case you can detect a failed write in the subbuf_start()
callback by calling the relay_buf_full() helper function.
relay_reserve() is used to reserve a slot in a channel buffer which
can be written to later. This would typically be used in applications
that need to write directly into a channel buffer without having to
stage data in a temporary buffer beforehand. Because the actual write
may not happen immediately after the slot is reserved, applications
using relay_reserve() can keep a count of the number of bytes actually
written, either in space reserved in the sub-buffers themselves or as
a separate array. See the 'reserve' example in the relay-apps tarball
at http://relayfs.sourceforge.net for an example of how this can be
done. Because the write is under control of the client and is
separated from the reserve, relay_reserve() doesn't protect the buffer
at all - it's up to the client to provide the appropriate
synchronization when using relay_reserve().
Closing a channel
-----------------
The client calls relay_close() when it's finished using the channel.
The channel and its associated buffers are destroyed when there are no
longer any references to any of the channel buffers. relay_flush()
forces a sub-buffer switch on all the channel buffers, and can be used
to finalize and process the last sub-buffers before the channel is
closed.
Misc
----
Some applications may want to keep a channel around and re-use it
rather than open and close a new channel for each use. relay_reset()
can be used for this purpose - it resets a channel to its initial
state without reallocating channel buffer memory or destroying
existing mappings. It should however only be called when it's safe to
do so, i.e. when the channel isn't currently being written to.
Finally, there are a couple of utility callbacks that can be used for
different purposes. buf_mapped() is called whenever a channel buffer
is mmapped from user space and buf_unmapped() is called when it's
unmapped. The client can use this notification to trigger actions
within the kernel application, such as enabling/disabling logging to
the channel.
Resources
=========
For news, example code, mailing list, etc. see the relay interface homepage:
http://relayfs.sourceforge.net
Credits
=======
The ideas and specs for the relay interface came about as a result of
discussions on tracing involving the following:
Michel Dagenais <michel.dagenais@polymtl.ca>
Richard Moore <richardj_moore@uk.ibm.com>
Bob Wisniewski <bob@watson.ibm.com>
Karim Yaghmour <karim@opersys.com>
Tom Zanussi <zanussi@us.ibm.com>
Also thanks to Hubertus Franke for a lot of useful suggestions and bug
reports.

View file

@ -1,442 +0,0 @@
relayfs - a high-speed data relay filesystem
============================================
relayfs is a filesystem designed to provide an efficient mechanism for
tools and facilities to relay large and potentially sustained streams
of data from kernel space to user space.
The main abstraction of relayfs is the 'channel'. A channel consists
of a set of per-cpu kernel buffers each represented by a file in the
relayfs filesystem. Kernel clients write into a channel using
efficient write functions which automatically log to the current cpu's
channel buffer. User space applications mmap() the per-cpu files and
retrieve the data as it becomes available.
The format of the data logged into the channel buffers is completely
up to the relayfs client; relayfs does however provide hooks which
allow clients to impose some structure on the buffer data. Nor does
relayfs implement any form of data filtering - this also is left to
the client. The purpose is to keep relayfs as simple as possible.
This document provides an overview of the relayfs API. The details of
the function parameters are documented along with the functions in the
filesystem code - please see that for details.
Semantics
=========
Each relayfs channel has one buffer per CPU, each buffer has one or
more sub-buffers. Messages are written to the first sub-buffer until
it is too full to contain a new message, in which case it it is
written to the next (if available). Messages are never split across
sub-buffers. At this point, userspace can be notified so it empties
the first sub-buffer, while the kernel continues writing to the next.
When notified that a sub-buffer is full, the kernel knows how many
bytes of it are padding i.e. unused. Userspace can use this knowledge
to copy only valid data.
After copying it, userspace can notify the kernel that a sub-buffer
has been consumed.
relayfs can operate in a mode where it will overwrite data not yet
collected by userspace, and not wait for it to consume it.
relayfs itself does not provide for communication of such data between
userspace and kernel, allowing the kernel side to remain simple and
not impose a single interface on userspace. It does provide a set of
examples and a separate helper though, described below.
klog and relay-apps example code
================================
relayfs itself is ready to use, but to make things easier, a couple
simple utility functions and a set of examples are provided.
The relay-apps example tarball, available on the relayfs sourceforge
site, contains a set of self-contained examples, each consisting of a
pair of .c files containing boilerplate code for each of the user and
kernel sides of a relayfs application; combined these two sets of
boilerplate code provide glue to easily stream data to disk, without
having to bother with mundane housekeeping chores.
The 'klog debugging functions' patch (klog.patch in the relay-apps
tarball) provides a couple of high-level logging functions to the
kernel which allow writing formatted text or raw data to a channel,
regardless of whether a channel to write into exists or not, or
whether relayfs is compiled into the kernel or is configured as a
module. These functions allow you to put unconditional 'trace'
statements anywhere in the kernel or kernel modules; only when there
is a 'klog handler' registered will data actually be logged (see the
klog and kleak examples for details).
It is of course possible to use relayfs from scratch i.e. without
using any of the relay-apps example code or klog, but you'll have to
implement communication between userspace and kernel, allowing both to
convey the state of buffers (full, empty, amount of padding).
klog and the relay-apps examples can be found in the relay-apps
tarball on http://relayfs.sourceforge.net
The relayfs user space API
==========================
relayfs implements basic file operations for user space access to
relayfs channel buffer data. Here are the file operations that are
available and some comments regarding their behavior:
open() enables user to open an _existing_ buffer.
mmap() results in channel buffer being mapped into the caller's
memory space. Note that you can't do a partial mmap - you must
map the entire file, which is NRBUF * SUBBUFSIZE.
read() read the contents of a channel buffer. The bytes read are
'consumed' by the reader i.e. they won't be available again
to subsequent reads. If the channel is being used in
no-overwrite mode (the default), it can be read at any time
even if there's an active kernel writer. If the channel is
being used in overwrite mode and there are active channel
writers, results may be unpredictable - users should make
sure that all logging to the channel has ended before using
read() with overwrite mode.
poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
notified when sub-buffer boundaries are crossed.
close() decrements the channel buffer's refcount. When the refcount
reaches 0 i.e. when no process or kernel client has the buffer
open, the channel buffer is freed.
In order for a user application to make use of relayfs files, the
relayfs filesystem must be mounted. For example,
mount -t relayfs relayfs /mnt/relay
NOTE: relayfs doesn't need to be mounted for kernel clients to create
or use channels - it only needs to be mounted when user space
applications need access to the buffer data.
The relayfs kernel API
======================
Here's a summary of the API relayfs provides to in-kernel clients:
channel management functions:
relay_open(base_filename, parent, subbuf_size, n_subbufs,
callbacks)
relay_close(chan)
relay_flush(chan)
relay_reset(chan)
relayfs_create_dir(name, parent)
relayfs_remove_dir(dentry)
relayfs_create_file(name, parent, mode, fops, data)
relayfs_remove_file(dentry)
channel management typically called on instigation of userspace:
relay_subbufs_consumed(chan, cpu, subbufs_consumed)
write functions:
relay_write(chan, data, length)
__relay_write(chan, data, length)
relay_reserve(chan, length)
callbacks:
subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
buf_mapped(buf, filp)
buf_unmapped(buf, filp)
create_buf_file(filename, parent, mode, buf, is_global)
remove_buf_file(dentry)
helper functions:
relay_buf_full(buf)
subbuf_start_reserve(buf, length)
Creating a channel
------------------
relay_open() is used to create a channel, along with its per-cpu
channel buffers. Each channel buffer will have an associated file
created for it in the relayfs filesystem, which can be opened and
mmapped from user space if desired. The files are named
basename0...basenameN-1 where N is the number of online cpus, and by
default will be created in the root of the filesystem. If you want a
directory structure to contain your relayfs files, you can create it
with relayfs_create_dir() and pass the parent directory to
relay_open(). Clients are responsible for cleaning up any directory
structure they create when the channel is closed - use
relayfs_remove_dir() for that.
The total size of each per-cpu buffer is calculated by multiplying the
number of sub-buffers by the sub-buffer size passed into relay_open().
The idea behind sub-buffers is that they're basically an extension of
double-buffering to N buffers, and they also allow applications to
easily implement random-access-on-buffer-boundary schemes, which can
be important for some high-volume applications. The number and size
of sub-buffers is completely dependent on the application and even for
the same application, different conditions will warrant different
values for these parameters at different times. Typically, the right
values to use are best decided after some experimentation; in general,
though, it's safe to assume that having only 1 sub-buffer is a bad
idea - you're guaranteed to either overwrite data or lose events
depending on the channel mode being used.
Channel 'modes'
---------------
relayfs channels can be used in either of two modes - 'overwrite' or
'no-overwrite'. The mode is entirely determined by the implementation
of the subbuf_start() callback, as described below. In 'overwrite'
mode, also known as 'flight recorder' mode, writes continuously cycle
around the buffer and will never fail, but will unconditionally
overwrite old data regardless of whether it's actually been consumed.
In no-overwrite mode, writes will fail i.e. data will be lost, if the
number of unconsumed sub-buffers equals the total number of
sub-buffers in the channel. It should be clear that if there is no
consumer or if the consumer can't consume sub-buffers fast enought,
data will be lost in either case; the only difference is whether data
is lost from the beginning or the end of a buffer.
As explained above, a relayfs channel is made of up one or more
per-cpu channel buffers, each implemented as a circular buffer
subdivided into one or more sub-buffers. Messages are written into
the current sub-buffer of the channel's current per-cpu buffer via the
write functions described below. Whenever a message can't fit into
the current sub-buffer, because there's no room left for it, the
client is notified via the subbuf_start() callback that a switch to a
new sub-buffer is about to occur. The client uses this callback to 1)
initialize the next sub-buffer if appropriate 2) finalize the previous
sub-buffer if appropriate and 3) return a boolean value indicating
whether or not to actually go ahead with the sub-buffer switch.
To implement 'no-overwrite' mode, the userspace client would provide
an implementation of the subbuf_start() callback something like the
following:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
if (relay_buf_full(buf))
return 0;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
If the current buffer is full i.e. all sub-buffers remain unconsumed,
the callback returns 0 to indicate that the buffer switch should not
occur yet i.e. until the consumer has had a chance to read the current
set of ready sub-buffers. For the relay_buf_full() function to make
sense, the consumer is reponsible for notifying relayfs when
sub-buffers have been consumed via relay_subbufs_consumed(). Any
subsequent attempts to write into the buffer will again invoke the
subbuf_start() callback with the same parameters; only when the
consumer has consumed one or more of the ready sub-buffers will
relay_buf_full() return 0, in which case the buffer switch can
continue.
The implementation of the subbuf_start() callback for 'overwrite' mode
would be very similar:
static int subbuf_start(struct rchan_buf *buf,
void *subbuf,
void *prev_subbuf,
unsigned int prev_padding)
{
if (prev_subbuf)
*((unsigned *)prev_subbuf) = prev_padding;
subbuf_start_reserve(buf, sizeof(unsigned int));
return 1;
}
In this case, the relay_buf_full() check is meaningless and the
callback always returns 1, causing the buffer switch to occur
unconditionally. It's also meaningless for the client to use the
relay_subbufs_consumed() function in this mode, as it's never
consulted.
The default subbuf_start() implementation, used if the client doesn't
define any callbacks, or doesn't define the subbuf_start() callback,
implements the simplest possible 'no-overwrite' mode i.e. it does
nothing but return 0.
Header information can be reserved at the beginning of each sub-buffer
by calling the subbuf_start_reserve() helper function from within the
subbuf_start() callback. This reserved area can be used to store
whatever information the client wants. In the example above, room is
reserved in each sub-buffer to store the padding count for that
sub-buffer. This is filled in for the previous sub-buffer in the
subbuf_start() implementation; the padding value for the previous
sub-buffer is passed into the subbuf_start() callback along with a
pointer to the previous sub-buffer, since the padding value isn't
known until a sub-buffer is filled. The subbuf_start() callback is
also called for the first sub-buffer when the channel is opened, to
give the client a chance to reserve space in it. In this case the
previous sub-buffer pointer passed into the callback will be NULL, so
the client should check the value of the prev_subbuf pointer before
writing into the previous sub-buffer.
Writing to a channel
--------------------
kernel clients write data into the current cpu's channel buffer using
relay_write() or __relay_write(). relay_write() is the main logging
function - it uses local_irqsave() to protect the buffer and should be
used if you might be logging from interrupt context. If you know
you'll never be logging from interrupt context, you can use
__relay_write(), which only disables preemption. These functions
don't return a value, so you can't determine whether or not they
failed - the assumption is that you wouldn't want to check a return
value in the fast logging path anyway, and that they'll always succeed
unless the buffer is full and no-overwrite mode is being used, in
which case you can detect a failed write in the subbuf_start()
callback by calling the relay_buf_full() helper function.
relay_reserve() is used to reserve a slot in a channel buffer which
can be written to later. This would typically be used in applications
that need to write directly into a channel buffer without having to
stage data in a temporary buffer beforehand. Because the actual write
may not happen immediately after the slot is reserved, applications
using relay_reserve() can keep a count of the number of bytes actually
written, either in space reserved in the sub-buffers themselves or as
a separate array. See the 'reserve' example in the relay-apps tarball
at http://relayfs.sourceforge.net for an example of how this can be
done. Because the write is under control of the client and is
separated from the reserve, relay_reserve() doesn't protect the buffer
at all - it's up to the client to provide the appropriate
synchronization when using relay_reserve().
Closing a channel
-----------------
The client calls relay_close() when it's finished using the channel.
The channel and its associated buffers are destroyed when there are no
longer any references to any of the channel buffers. relay_flush()
forces a sub-buffer switch on all the channel buffers, and can be used
to finalize and process the last sub-buffers before the channel is
closed.
Creating non-relay files
------------------------
relay_open() automatically creates files in the relayfs filesystem to
represent the per-cpu kernel buffers; it's often useful for
applications to be able to create their own files alongside the relay
files in the relayfs filesystem as well e.g. 'control' files much like
those created in /proc or debugfs for similar purposes, used to
communicate control information between the kernel and user sides of a
relayfs application. For this purpose the relayfs_create_file() and
relayfs_remove_file() API functions exist. For relayfs_create_file(),
the caller passes in a set of user-defined file operations to be used
for the file and an optional void * to a user-specified data item,
which will be accessible via inode->u.generic_ip (see the relay-apps
tarball for examples). The file_operations are a required parameter
to relayfs_create_file() and thus the semantics of these files are
completely defined by the caller.
See the relay-apps tarball at http://relayfs.sourceforge.net for
examples of how these non-relay files are meant to be used.
Creating relay files in other filesystems
-----------------------------------------
By default of course, relay_open() creates relay files in the relayfs
filesystem. Because relay_file_operations is exported, however, it's
also possible to create and use relay files in other pseudo-filesytems
such as debugfs.
For this purpose, two callback functions are provided,
create_buf_file() and remove_buf_file(). create_buf_file() is called
once for each per-cpu buffer from relay_open() to allow the client to
create a file to be used to represent the corresponding buffer; if
this callback is not defined, the default implementation will create
and return a file in the relayfs filesystem to represent the buffer.
The callback should return the dentry of the file created to represent
the relay buffer. Note that the parent directory passed to
relay_open() (and passed along to the callback), if specified, must
exist in the same filesystem the new relay file is created in. If
create_buf_file() is defined, remove_buf_file() must also be defined;
it's responsible for deleting the file(s) created in create_buf_file()
and is called during relay_close().
The create_buf_file() implementation can also be defined in such a way
as to allow the creation of a single 'global' buffer instead of the
default per-cpu set. This can be useful for applications interested
mainly in seeing the relative ordering of system-wide events without
the need to bother with saving explicit timestamps for the purpose of
merging/sorting per-cpu files in a postprocessing step.
To have relay_open() create a global buffer, the create_buf_file()
implementation should set the value of the is_global outparam to a
non-zero value in addition to creating the file that will be used to
represent the single buffer. In the case of a global buffer,
create_buf_file() and remove_buf_file() will be called only once. The
normal channel-writing functions e.g. relay_write() can still be used
- writes from any cpu will transparently end up in the global buffer -
but since it is a global buffer, callers should make sure they use the
proper locking for such a buffer, either by wrapping writes in a
spinlock, or by copying a write function from relayfs_fs.h and
creating a local version that internally does the proper locking.
See the 'exported-relayfile' examples in the relay-apps tarball for
examples of creating and using relay files in debugfs.
Misc
----
Some applications may want to keep a channel around and re-use it
rather than open and close a new channel for each use. relay_reset()
can be used for this purpose - it resets a channel to its initial
state without reallocating channel buffer memory or destroying
existing mappings. It should however only be called when it's safe to
do so i.e. when the channel isn't currently being written to.
Finally, there are a couple of utility callbacks that can be used for
different purposes. buf_mapped() is called whenever a channel buffer
is mmapped from user space and buf_unmapped() is called when it's
unmapped. The client can use this notification to trigger actions
within the kernel application, such as enabling/disabling logging to
the channel.
Resources
=========
For news, example code, mailing list, etc. see the relayfs homepage:
http://relayfs.sourceforge.net
Credits
=======
The ideas and specs for relayfs came about as a result of discussions
on tracing involving the following:
Michel Dagenais <michel.dagenais@polymtl.ca>
Richard Moore <richardj_moore@uk.ibm.com>
Bob Wisniewski <bob@watson.ibm.com>
Karim Yaghmour <karim@opersys.com>
Tom Zanussi <zanussi@us.ibm.com>
Also thanks to Hubertus Franke for a lot of useful suggestions and bug
reports.

View file

@ -39,7 +39,6 @@ them. Bug reports and success stories are also welcome.
The input project website is at:
http://www.suse.cz/development/input/
http://atrey.karlin.mff.cuni.cz/~vojtech/input/
There is also a mailing list for the driver at:

View file

@ -1,3 +1,126 @@
Release Date : Fri May 19 09:31:45 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.9 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
1. Fixed a bug in megaraid_init_mbox().
Customer reported "garbage in file on x86_64 platform".
Root Cause: the driver registered controllers as 64-bit DMA capable
for those which are not support it.
Fix: Made change in the function inserting identification machanism
identifying 64-bit DMA capable controllers.
> -----Original Message-----
> From: Vasily Averin [mailto:vvs@sw.ru]
> Sent: Thursday, May 04, 2006 2:49 PM
> To: linux-scsi@vger.kernel.org; Kolli, Neela; Mukker, Atul;
> Ju, Seokmann; Bagalkote, Sreenivas;
> James.Bottomley@SteelEye.com; devel@openvz.org
> Subject: megaraid_mbox: garbage in file
>
> Hello all,
>
> I've investigated customers claim on the unstable work of
> their node and found a
> strange effect: reading from some files leads to the
> "attempt to access beyond end of device" messages.
>
> I've checked filesystem, memory on the node, motherboard BIOS
> version, but it
> does not help and issue still has been reproduced by simple
> file reading.
>
> Reproducer is simple:
>
> echo 0xffffffff >/proc/sys/dev/scsi/logging_level ;
> cat /vz/private/101/root/etc/ld.so.cache >/tmp/ttt ;
> echo 0 >/proc/sys/dev/scsi/logging
>
> It leads to the following messages in dmesg
>
> sd_init_command: disk=sda, block=871769260, count=26
> sda : block=871769260
> sda : reading 26/26 512 byte blocks.
> scsi_add_timer: scmd: f79ed980, time: 7500, (c02b1420)
> sd 0:1:0:0: send 0xf79ed980 sd 0:1:0:0:
> command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
> buffer = 0xf7cfb540, bufflen = 13312, done = 0xc0366b40,
> queuecommand 0xc0344010
> leaving scsi_dispatch_cmnd()
> scsi_delete_timer: scmd: f79ed980, rtn: 1
> sd 0:1:0:0: done 0xf79ed980 SUCCESS 0 sd 0:1:0:0:
> command: Read (10): 28 00 33 f6 24 ac 00 00 1a 00
> scsi host busy 1 failed 0
> sd 0:1:0:0: Notifying upper driver of completion (result 0)
> sd_rw_intr: sda: res=0x0
> 26 sectors total, 13312 bytes done.
> use_sg is 4
> attempt to access beyond end of device
> sda6: rw=0, want=1044134458, limit=951401367
> Buffer I/O error on device sda6, logical block 522067228
> attempt to access beyond end of device
2. When INQUIRY with EVPD bit set issued to the MegaRAID controller,
system memory gets corrupted.
Root Cause: MegaRAID F/W handle the INQUIRY with EVPD bit set
incorrectly.
Fix: MegaRAID F/W has fixed the problem and being process of release,
soon. Meanwhile, driver will filter out the request.
3. One of member in the data structure of the driver leads unaligne
issue on 64-bit platform.
Customer reporeted "kernel unaligned access addrss" issue when
application communicates with MegaRAID HBA driver.
Root Cause: in uioc_t structure, one of member had misaligned and it
led system to display the error message.
Fix: A patch submitted to community from following folk.
> -----Original Message-----
> From: linux-scsi-owner@vger.kernel.org
> [mailto:linux-scsi-owner@vger.kernel.org] On Behalf Of Sakurai Hiroomi
> Sent: Wednesday, July 12, 2006 4:20 AM
> To: linux-scsi@vger.kernel.org; linux-kernel@vger.kernel.org
> Subject: Re: Help: strange messages from kernel on IA64 platform
>
> Hi,
>
> I saw same message.
>
> When GAM(Global Array Manager) is started, The following
> message output.
> kernel: kernel unaligned access to 0xe0000001fe1080d4,
> ip=0xa000000200053371
>
> The uioc structure used by ioctl is defined by packed,
> the allignment of each member are disturbed.
> In a 64 bit structure, the allignment of member doesn't fit 64 bit
> boundary. this causes this messages.
> In a 32 bit structure, we don't see the message because the allinment
> of member fit 32 bit boundary even if packed is specified.
>
> patch
> I Add 32 bit dummy member to fit 64 bit boundary. I tested.
> We confirmed this patch fix the problem by IA64 server.
>
> **************************************************************
> ****************
> --- linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h.orig
> 2006-04-03 17:13:03.000000000 +0900
> +++ linux-2.6.9/drivers/scsi/megaraid/megaraid_ioctl.h
> 2006-04-03 17:14:09.000000000 +0900
> @@ -132,6 +132,10 @@
> /* Driver Data: */
> void __user * user_data;
> uint32_t user_data_len;
> +
> + /* 64bit alignment */
> + uint32_t pad_0xBC;
> +
> mraid_passthru_t __user *user_pthru;
>
> mraid_passthru_t *pthru32;
> **************************************************************
> ****************
Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju <sju@lsil.com>
Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module)
Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module)

View file

@ -25,6 +25,7 @@ Currently, these files are in /proc/sys/fs:
- inode-state
- overflowuid
- overflowgid
- suid_dumpable
- super-max
- super-nr
@ -131,6 +132,25 @@ The default is 65534.
==============================================================
suid_dumpable:
This value can be used to query and set the core dump mode for setuid
or otherwise protected/tainted binaries. The modes are
0 - (default) - traditional behaviour. Any process which has changed
privilege levels or is execute only will not be dumped
1 - (debug) - all processes dump core when possible. The core dump is
owned by the current user and no security is applied. This is
intended for system debugging situations only. Ptrace is unchecked.
2 - (suidsafe) - any binary which normally would not be dumped is dumped
readable by root only. This allows the end user to remove
such a dump but not access it directly. For security reasons
core dumps in this mode will not overwrite one another or
other files. This mode is appropriate when adminstrators are
attempting to debug problems in a normal environment.
==============================================================
super-max & super-nr:
These numbers control the maximum number of superblocks, and

View file

@ -50,7 +50,6 @@ show up in /proc/sys/kernel:
- shmmax [ sysv ipc ]
- shmmni
- stop-a [ SPARC only ]
- suid_dumpable
- sysrq ==> Documentation/sysrq.txt
- tainted
- threads-max
@ -310,25 +309,6 @@ kernel. This value defaults to SHMMAX.
==============================================================
suid_dumpable:
This value can be used to query and set the core dump mode for setuid
or otherwise protected/tainted binaries. The modes are
0 - (default) - traditional behaviour. Any process which has changed
privilege levels or is execute only will not be dumped
1 - (debug) - all processes dump core when possible. The core dump is
owned by the current user and no security is applied. This is
intended for system debugging situations only. Ptrace is unchecked.
2 - (suidsafe) - any binary which normally would not be dumped is dumped
readable by root only. This allows the end user to remove
such a dump but not access it directly. For security reasons
core dumps in this mode will not overwrite one another or
other files. This mode is appropriate when adminstrators are
attempting to debug problems in a normal environment.
==============================================================
tainted:
Non-zero if the kernel has been tainted. Numeric values, which

View file

@ -889,6 +889,12 @@ M: rdunlap@xenotime.net
T: git http://tali.admingilde.org/git/linux-docbook.git
S: Maintained
DOCKING STATION DRIVER
P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com
L: linux-acpi@vger.kernel.org
S: Maintained
DOUBLETALK DRIVER
P: James R. Van Zandt
M: jrv@vanzandt.mv.com

View file

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 18
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*

View file

@ -179,17 +179,19 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
struct safe_buffer *b = NULL;
struct safe_buffer *b, *rb = NULL;
unsigned long flags;
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr)
if (b->safe_dma_addr == safe_dma_addr) {
rb = b;
break;
}
read_unlock_irqrestore(&device_info->lock, flags);
return b;
return rb;
}
static inline void

View file

@ -634,6 +634,14 @@ ENTRY(__switch_to)
* purpose.
*/
.macro usr_ret, reg
#ifdef CONFIG_ARM_THUMB
bx \reg
#else
mov pc, \reg
#endif
.endm
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@ -675,7 +683,7 @@ __kuser_memory_barrier: @ 0xffff0fa0
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
usr_ret lr
.align 5
@ -778,7 +786,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
mov r0, #-1
adds r0, r0, #0
#endif
mov pc, lr
usr_ret lr
#else
@ -792,7 +800,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
mov pc, lr
usr_ret lr
#endif
@ -834,16 +842,11 @@ __kuser_cmpxchg: @ 0xffff0fc0
__kuser_get_tls: @ 0xffff0fe0
#if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
mov pc, lr
#else
mrc p15, 0, r0, c13, c0, 3 @ read TLS register
mov pc, lr
#endif
usr_ret lr
.rep 5
.word 0 @ pad up to __kuser_helper_version

View file

@ -118,7 +118,7 @@ ENTRY(secondary_startup)
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address
add pc, r10, #12 @ initialise processor
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
@ (return control reg)
/*

View file

@ -10,45 +10,47 @@ obj-m :=
obj-n :=
obj- :=
# DMA
obj-$(CONFIG_S3C2410_DMA) += dma.o
# S3C2400 support files
obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
obj-$(CONFIG_CPU_S3C2400) += s3c2400-gpio.o
# S3C2410 support files
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
obj-$(CONFIG_S3C2410_DMA) += dma.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_CPU_S3C2410) += s3c2410-gpio.o
# Power Management support
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
obj-$(CONFIG_PM) += pm.o sleep.o
obj-$(CONFIG_PM_SIMTEC) += pm-simtec.o
# S3C2412 support
obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412.o
obj-$(CONFIG_CPU_S3C2412) += s3c2412-clock.o
#
# S3C244X support
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x.o
obj-$(CONFIG_CPU_S3C244X) += s3c244x-irq.o
# Clock control
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
# S3C2440 support
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
obj-$(CONFIG_CPU_S3C2440) += s3c2410-gpio.o
# S3C2442 support
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += s3c2442-clock.o
# bast extras

View file

@ -112,7 +112,7 @@ dmadbg_capture(s3c2410_dma_chan_t *chan, struct s3c2410_dma_regstate *regs)
}
static void
dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
dmadbg_dumpregs(const char *fname, int line, s3c2410_dma_chan_t *chan,
struct s3c2410_dma_regstate *regs)
{
printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
@ -132,7 +132,16 @@ dmadbg_showchan(const char *fname, int line, s3c2410_dma_chan_t *chan)
chan->number, fname, line, chan->load_state,
chan->curr, chan->next, chan->end);
dmadbg_showregs(fname, line, chan, &state);
dmadbg_dumpregs(fname, line, chan, &state);
}
static void
dmadbg_showregs(const char *fname, int line, s3c2410_dma_chan_t *chan)
{
struct s3c2410_dma_regstate state;
dmadbg_capture(chan, &state);
dmadbg_dumpregs(fname, line, chan, &state);
}
#define dbg_showregs(chan) dmadbg_showregs(__FUNCTION__, __LINE__, (chan))
@ -253,10 +262,14 @@ s3c2410_dma_loadbuffer(s3c2410_dma_chan_t *chan,
buf->next);
reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
} else {
pr_debug("load_state is %d => autoreload\n", chan->load_state);
//pr_debug("load_state is %d => autoreload\n", chan->load_state);
reload = S3C2410_DCON_AUTORELOAD;
}
if ((buf->data & 0xf0000000) != 0x30000000) {
dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
}
writel(buf->data, chan->addr_reg);
dma_wrreg(chan, S3C2410_DMA_DCON,
@ -370,7 +383,7 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
tmp |= S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
pr_debug("wrote %08lx to DMASKTRIG\n", tmp);
pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
#if 0
/* the dma buffer loads should take care of clearing the AUTO
@ -384,7 +397,30 @@ static int s3c2410_dma_start(s3c2410_dma_chan_t *chan)
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__FUNCTION__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
@ -436,12 +472,11 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
if (buf == NULL) {
pr_debug("%s: out of memory (%ld alloc)\n",
__FUNCTION__, sizeof(*buf));
__FUNCTION__, (long)sizeof(*buf));
return -ENOMEM;
}
pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
//pr_debug("%s: new buffer %p\n", __FUNCTION__, buf);
//dbg_showchan(chan);
buf->next = NULL;
@ -537,14 +572,20 @@ s3c2410_dma_lastxfer(s3c2410_dma_chan_t *chan)
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load\n",
chan->number);
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __FUNCTION__);
return;
}
break;
case S3C2410_DMALOAD_1LOADED_1RUNNING:
/* I belive in this case we do not have anything to do
* until the next buffer comes along, and we turn off the
* reload */
return;
default:
pr_debug("dma%d: lastxfer: unhandled load_state %d with no next",
pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
chan->number, chan->load_state);
return;
@ -629,7 +670,14 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
} else {
}
if (chan->next != NULL) {
/* only reload if the channel is still running... our buffer done
* routine may have altered the state by requesting the dma channel
* to stop or shutdown... */
/* todo: check that when the channel is shut-down from inside this
* function, we cope with unsetting reload, etc */
if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
unsigned long flags;
switch (chan->load_state) {
@ -644,8 +692,8 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load\n",
chan->number);
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __FUNCTION__);
return IRQ_HANDLED;
}
@ -678,8 +726,6 @@ s3c2410_dma_irq(int irq, void *devpw, struct pt_regs *regs)
return IRQ_HANDLED;
}
/* s3c2410_request_dma
*
* get control of an dma channel
@ -718,11 +764,17 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
pr_debug("dma%d: %s : requesting irq %d\n",
channel, __FUNCTION__, chan->irq);
chan->irq_claimed = 1;
local_irq_restore(flags);
err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
client->name, (void *)chan);
local_irq_save(flags);
if (err) {
chan->in_use = 0;
chan->irq_claimed = 0;
local_irq_restore(flags);
printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
@ -730,7 +782,6 @@ int s3c2410_dma_request(unsigned int channel, s3c2410_dma_client_t *client,
return err;
}
chan->irq_claimed = 1;
chan->irq_enabled = 1;
}
@ -810,6 +861,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
tmp |= S3C2410_DMASKTRIG_STOP;
//tmp &= ~S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
#if 0
@ -819,6 +871,7 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
#endif
/* should stop do this, or should we wait for flush? */
chan->state = S3C2410_DMA_IDLE;
chan->load_state = S3C2410_DMALOAD_NONE;
@ -827,6 +880,22 @@ static int s3c2410_dma_dostop(s3c2410_dma_chan_t *chan)
return 0;
}
void s3c2410_dma_waitforstop(s3c2410_dma_chan_t *chan)
{
unsigned long tmp;
unsigned int timeout = 0x10000;
while (timeout-- > 0) {
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
if (!(tmp & S3C2410_DMASKTRIG_ON))
return;
}
pr_debug("dma%d: failed to stop?\n", chan->number);
}
/* s3c2410_dma_flush
*
* stop the channel, and remove all current and pending transfers
@ -837,7 +906,9 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
s3c2410_dma_buf_t *buf, *next;
unsigned long flags;
pr_debug("%s:\n", __FUNCTION__);
pr_debug("%s: chan %p (%d)\n", __FUNCTION__, chan, chan->number);
dbg_showchan(chan);
local_irq_save(flags);
@ -864,11 +935,64 @@ static int s3c2410_dma_flush(s3c2410_dma_chan_t *chan)
}
}
dbg_showregs(chan);
s3c2410_dma_waitforstop(chan);
#if 0
/* should also clear interrupts, according to WinCE BSP */
{
unsigned long tmp;
tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
tmp |= S3C2410_DCON_NORELOAD;
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
}
#endif
dbg_showregs(chan);
local_irq_restore(flags);
return 0;
}
int
s3c2410_dma_started(s3c2410_dma_chan_t *chan)
{
unsigned long flags;
local_irq_save(flags);
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__FUNCTION__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
int
s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
@ -885,14 +1009,15 @@ s3c2410_dma_ctrl(dmach_t channel, s3c2410_chan_op_t op)
return s3c2410_dma_dostop(chan);
case S3C2410_DMAOP_PAUSE:
return -ENOENT;
case S3C2410_DMAOP_RESUME:
return -ENOENT;
case S3C2410_DMAOP_FLUSH:
return s3c2410_dma_flush(chan);
case S3C2410_DMAOP_STARTED:
return s3c2410_dma_started(chan);
case S3C2410_DMAOP_TIMEOUT:
return 0;

View file

@ -285,7 +285,7 @@ static struct flash_platform_data versatile_flash_data = {
static struct resource versatile_flash_resource = {
.start = VERSATILE_FLASH_BASE,
.end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE,
.end = VERSATILE_FLASH_BASE + VERSATILE_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
};

View file

@ -142,6 +142,7 @@ config X86_SUMMIT
In particular, it is needed for the x440.
If you don't have one of these computers, you should say N here.
If you want to build a NUMA kernel, you must select ACPI.
config X86_BIGSMP
bool "Support for other sub-arch SMP systems with more than 8 CPUs"
@ -169,6 +170,7 @@ config X86_GENERICARCH
help
This option compiles in the Summit, bigsmp, ES7000, default subarchitectures.
It is intended for a generic binary kernel.
If you want a NUMA kernel, select ACPI. We need SRAT for NUMA.
config X86_ES7000
bool "Support for Unisys ES7000 IA32 series"
@ -542,7 +544,7 @@ config X86_PAE
# Common NUMA Features
config NUMA
bool "Numa Memory Allocation and Scheduler Support"
depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
depends on SMP && HIGHMEM64G && (X86_NUMAQ || (X86_SUMMIT || X86_GENERICARCH) && ACPI)
default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT)

View file

@ -59,7 +59,7 @@ static inline int gsi_irq_sharing(int gsi) { return gsi; }
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((acpi_table_entry_header *)entry)->length != sizeof(*entry))
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "

View file

@ -292,7 +292,10 @@ ENTRY(do_suspend_lowlevel)
pushl $3
call acpi_enter_sleep_state
addl $4, %esp
ret
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
jmp ret_point
.p2align 4,,7
ret_point:
call restore_registers

View file

@ -567,16 +567,11 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static int __init
acpi_cpufreq_init (void)
{
int result = 0;
dprintk("acpi_cpufreq_init\n");
result = acpi_cpufreq_early_init_acpi();
acpi_cpufreq_early_init_acpi();
if (!result)
result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
return cpufreq_register_driver(&acpi_cpufreq_driver);
}

View file

@ -14,8 +14,12 @@ static __init int pci_access_init(void)
#ifdef CONFIG_PCI_BIOS
pci_pcbios_init();
#endif
if (raw_pci_ops)
return 0;
/*
* don't check for raw_pci_ops here because we want pcbios as last
* fallback, yet it's needed to run first to set pcibios_last_bus
* in case legacy PCI probing is used. otherwise detecting peer busses
* fails.
*/
#ifdef CONFIG_PCI_DIRECT
pci_direct_init();
#endif

View file

@ -178,7 +178,7 @@ static __init void unreachable_devices(void)
pci_exp_set_dev_base(addr, k, PCI_DEVFN(i, 0));
if (addr == 0 ||
readl((u32 __iomem *)mmcfg_virt_addr) != val1) {
set_bit(i, fallback_slots);
set_bit(i + 32*k, fallback_slots);
printk(KERN_NOTICE
"PCI: No mmconfig possible on %x:%x\n", k, i);
}

View file

@ -244,7 +244,8 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len);
else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
else for (slp = (struct scatterlist *)sc->request_buffer;
scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen);

View file

@ -55,7 +55,7 @@
#define BAD_MADT_ENTRY(entry, end) ( \
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((acpi_table_entry_header *)entry)->length != sizeof(*entry))
((acpi_table_entry_header *)entry)->length < sizeof(*entry))
#define PREFIX "ACPI: "

View file

@ -0,0 +1,257 @@
/*
* MPC8540 ADS Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "MPC8540ADS";
compatible = "MPC85xxADS";
#address-cells = <1>;
#size-cells = <1>;
linux,phandle = <100>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells = <0>;
linux,phandle = <200>;
PowerPC,8540@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K
i-cache-size = <8000>; // L1, 32K
timebase-frequency = <0>; // 33 MHz, from uboot
bus-frequency = <0>; // 166 MHz
clock-frequency = <0>; // 825 MHz, from uboot
32-bit;
linux,phandle = <201>;
};
};
memory {
device_type = "memory";
linux,phandle = <300>;
reg = <00000000 08000000>; // 128M at 0x0
};
soc8540@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
ranges = <0 e0000000 00100000>;
reg = <e0000000 00100000>; // CCSRBAR 1M
bus-frequency = <0>;
i2c@3000 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3000 100>;
interrupts = <1b 2>;
interrupt-parent = <40000>;
dfsrr;
};
mdio@24520 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "mdio";
compatible = "gianfar";
reg = <24520 20>;
linux,phandle = <24520>;
ethernet-phy@0 {
linux,phandle = <2452000>;
interrupt-parent = <40000>;
interrupts = <35 1>;
reg = <0>;
device_type = "ethernet-phy";
};
ethernet-phy@1 {
linux,phandle = <2452001>;
interrupt-parent = <40000>;
interrupts = <35 1>;
reg = <1>;
device_type = "ethernet-phy";
};
ethernet-phy@3 {
linux,phandle = <2452003>;
interrupt-parent = <40000>;
interrupts = <37 1>;
reg = <3>;
device_type = "ethernet-phy";
};
};
ethernet@24000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
address = [ 00 E0 0C 00 73 00 ];
local-mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 2 e 2 12 2>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
};
ethernet@25000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
address = [ 00 E0 0C 00 73 01 ];
local-mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 2 14 2 18 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
ethernet@26000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "FEC";
compatible = "gianfar";
reg = <26000 1000>;
address = [ 00 E0 0C 00 73 02 ];
local-mac-address = [ 00 E0 0C 00 73 02 ];
interrupts = <19 2>;
interrupt-parent = <40000>;
phy-handle = <2452003>;
};
serial@4500 {
device_type = "serial";
compatible = "ns16550";
reg = <4500 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
serial@4600 {
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
pci@8000 {
linux,phandle = <8000>;
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x02 */
1000 0 0 1 40000 31 1
1000 0 0 2 40000 32 1
1000 0 0 3 40000 33 1
1000 0 0 4 40000 34 1
/* IDSEL 0x03 */
1800 0 0 1 40000 34 1
1800 0 0 2 40000 31 1
1800 0 0 3 40000 32 1
1800 0 0 4 40000 33 1
/* IDSEL 0x04 */
2000 0 0 1 40000 33 1
2000 0 0 2 40000 34 1
2000 0 0 3 40000 31 1
2000 0 0 4 40000 32 1
/* IDSEL 0x05 */
2800 0 0 1 40000 32 1
2800 0 0 2 40000 33 1
2800 0 0 3 40000 34 1
2800 0 0 4 40000 31 1
/* IDSEL 0x0c */
6000 0 0 1 40000 31 1
6000 0 0 2 40000 32 1
6000 0 0 3 40000 33 1
6000 0 0 4 40000 34 1
/* IDSEL 0x0d */
6800 0 0 1 40000 34 1
6800 0 0 2 40000 31 1
6800 0 0 3 40000 32 1
6800 0 0 4 40000 33 1
/* IDSEL 0x0e */
7000 0 0 1 40000 33 1
7000 0 0 2 40000 34 1
7000 0 0 3 40000 31 1
7000 0 0 4 40000 32 1
/* IDSEL 0x0f */
7800 0 0 1 40000 32 1
7800 0 0 2 40000 33 1
7800 0 0 3 40000 34 1
7800 0 0 4 40000 31 1
/* IDSEL 0x12 */
9000 0 0 1 40000 31 1
9000 0 0 2 40000 32 1
9000 0 0 3 40000 33 1
9000 0 0 4 40000 34 1
/* IDSEL 0x13 */
9800 0 0 1 40000 34 1
9800 0 0 2 40000 31 1
9800 0 0 3 40000 32 1
9800 0 0 4 40000 33 1
/* IDSEL 0x14 */
a000 0 0 1 40000 33 1
a000 0 0 2 40000 34 1
a000 0 0 3 40000 31 1
a000 0 0 4 40000 32 1
/* IDSEL 0x15 */
a800 0 0 1 40000 32 1
a800 0 0 2 40000 33 1
a800 0 0 3 40000 34 1
a800 0 0 4 40000 31 1>;
interrupt-parent = <40000>;
interrupts = <08 2>;
bus-range = <0 0>;
ranges = <02000000 0 80000000 80000000 0 20000000
01000000 0 00000000 e2000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
compatible = "85xx";
device_type = "pci";
};
pic@40000 {
linux,phandle = <40000>;
clock-frequency = <0>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <40000 40000>;
built-in;
compatible = "chrp,open-pic";
device_type = "open-pic";
big-endian;
};
};
};

View file

@ -0,0 +1,244 @@
/*
* MPC8541 CDS Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "MPC8541CDS";
compatible = "MPC85xxCDS";
#address-cells = <1>;
#size-cells = <1>;
linux,phandle = <100>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells = <0>;
linux,phandle = <200>;
PowerPC,8541@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K
i-cache-size = <8000>; // L1, 32K
timebase-frequency = <0>; // 33 MHz, from uboot
bus-frequency = <0>; // 166 MHz
clock-frequency = <0>; // 825 MHz, from uboot
32-bit;
linux,phandle = <201>;
};
};
memory {
device_type = "memory";
linux,phandle = <300>;
reg = <00000000 08000000>; // 128M at 0x0
};
soc8541@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
ranges = <0 e0000000 00100000>;
reg = <e0000000 00100000>; // CCSRBAR 1M
bus-frequency = <0>;
i2c@3000 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3000 100>;
interrupts = <1b 2>;
interrupt-parent = <40000>;
dfsrr;
};
mdio@24520 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "mdio";
compatible = "gianfar";
reg = <24520 20>;
linux,phandle = <24520>;
ethernet-phy@0 {
linux,phandle = <2452000>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <0>;
device_type = "ethernet-phy";
};
ethernet-phy@1 {
linux,phandle = <2452001>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <1>;
device_type = "ethernet-phy";
};
};
ethernet@24000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
local-mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 2 e 2 12 2>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
};
ethernet@25000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
local-mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 2 14 2 18 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
serial@4500 {
device_type = "serial";
compatible = "ns16550";
reg = <4500 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
serial@4600 {
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
pci@8000 {
linux,phandle = <8000>;
interrupt-map-mask = <1f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x10 */
08000 0 0 1 40000 30 1
08000 0 0 2 40000 31 1
08000 0 0 3 40000 32 1
08000 0 0 4 40000 33 1
/* IDSEL 0x11 */
08800 0 0 1 40000 30 1
08800 0 0 2 40000 31 1
08800 0 0 3 40000 32 1
08800 0 0 4 40000 33 1
/* IDSEL 0x12 (Slot 1) */
09000 0 0 1 40000 30 1
09000 0 0 2 40000 31 1
09000 0 0 3 40000 32 1
09000 0 0 4 40000 33 1
/* IDSEL 0x13 (Slot 2) */
09800 0 0 1 40000 31 1
09800 0 0 2 40000 32 1
09800 0 0 3 40000 33 1
09800 0 0 4 40000 30 1
/* IDSEL 0x14 (Slot 3) */
0a000 0 0 1 40000 32 1
0a000 0 0 2 40000 33 1
0a000 0 0 3 40000 30 1
0a000 0 0 4 40000 31 1
/* IDSEL 0x15 (Slot 4) */
0a800 0 0 1 40000 33 1
0a800 0 0 2 40000 30 1
0a800 0 0 3 40000 31 1
0a800 0 0 4 40000 32 1
/* Bus 1 (Tundra Bridge) */
/* IDSEL 0x12 (ISA bridge) */
19000 0 0 1 40000 30 1
19000 0 0 2 40000 31 1
19000 0 0 3 40000 32 1
19000 0 0 4 40000 33 1>;
interrupt-parent = <40000>;
interrupts = <08 2>;
bus-range = <0 0>;
ranges = <02000000 0 80000000 80000000 0 20000000
01000000 0 00000000 e2000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
compatible = "85xx";
device_type = "pci";
i8259@19000 {
clock-frequency = <0>;
interrupt-controller;
device_type = "interrupt-controller";
reg = <19000 0 0 0 1>;
#address-cells = <0>;
#interrupt-cells = <2>;
built-in;
compatible = "chrp,iic";
big-endian;
interrupts = <1>;
interrupt-parent = <8000>;
};
};
pci@9000 {
linux,phandle = <9000>;
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x15 */
a800 0 0 1 40000 3b 1
a800 0 0 2 40000 3b 1
a800 0 0 3 40000 3b 1
a800 0 0 4 40000 3b 1>;
interrupt-parent = <40000>;
interrupts = <09 2>;
bus-range = <0 0>;
ranges = <02000000 0 a0000000 a0000000 0 20000000
01000000 0 00000000 e3000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
compatible = "85xx";
device_type = "pci";
};
pic@40000 {
linux,phandle = <40000>;
clock-frequency = <0>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <40000 40000>;
built-in;
compatible = "chrp,open-pic";
device_type = "open-pic";
big-endian;
};
};
};

View file

@ -0,0 +1,287 @@
/*
* MPC8555 CDS Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "MPC8548CDS";
compatible = "MPC85xxCDS";
#address-cells = <1>;
#size-cells = <1>;
linux,phandle = <100>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells = <0>;
linux,phandle = <200>;
PowerPC,8548@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K
i-cache-size = <8000>; // L1, 32K
timebase-frequency = <0>; // 33 MHz, from uboot
bus-frequency = <0>; // 166 MHz
clock-frequency = <0>; // 825 MHz, from uboot
32-bit;
linux,phandle = <201>;
};
};
memory {
device_type = "memory";
linux,phandle = <300>;
reg = <00000000 08000000>; // 128M at 0x0
};
soc8548@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
ranges = <0 e0000000 00100000>;
reg = <e0000000 00100000>; // CCSRBAR 1M
bus-frequency = <0>;
i2c@3000 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3000 100>;
interrupts = <1b 2>;
interrupt-parent = <40000>;
dfsrr;
};
mdio@24520 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "mdio";
compatible = "gianfar";
reg = <24520 20>;
linux,phandle = <24520>;
ethernet-phy@0 {
linux,phandle = <2452000>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <0>;
device_type = "ethernet-phy";
};
ethernet-phy@1 {
linux,phandle = <2452001>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <1>;
device_type = "ethernet-phy";
};
ethernet-phy@2 {
linux,phandle = <2452002>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <2>;
device_type = "ethernet-phy";
};
ethernet-phy@3 {
linux,phandle = <2452003>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <3>;
device_type = "ethernet-phy";
};
};
ethernet@24000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <24000 1000>;
local-mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <d 2 e 2 12 2>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
};
ethernet@25000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <25000 1000>;
local-mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 2 14 2 18 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
ethernet@26000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <26000 1000>;
local-mac-address = [ 00 E0 0C 00 73 02 ];
interrupts = <f 2 10 2 11 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
/* eTSEC 4 is currently broken
ethernet@27000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "eTSEC";
compatible = "gianfar";
reg = <27000 1000>;
local-mac-address = [ 00 E0 0C 00 73 03 ];
interrupts = <15 2 16 2 17 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
*/
serial@4500 {
device_type = "serial";
compatible = "ns16550";
reg = <4500 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
serial@4600 {
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
pci@8000 {
linux,phandle = <8000>;
interrupt-map-mask = <1f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x10 */
08000 0 0 1 40000 30 1
08000 0 0 2 40000 31 1
08000 0 0 3 40000 32 1
08000 0 0 4 40000 33 1
/* IDSEL 0x11 */
08800 0 0 1 40000 30 1
08800 0 0 2 40000 31 1
08800 0 0 3 40000 32 1
08800 0 0 4 40000 33 1
/* IDSEL 0x12 (Slot 1) */
09000 0 0 1 40000 30 1
09000 0 0 2 40000 31 1
09000 0 0 3 40000 32 1
09000 0 0 4 40000 33 1
/* IDSEL 0x13 (Slot 2) */
09800 0 0 1 40000 31 1
09800 0 0 2 40000 32 1
09800 0 0 3 40000 33 1
09800 0 0 4 40000 30 1
/* IDSEL 0x14 (Slot 3) */
0a000 0 0 1 40000 32 1
0a000 0 0 2 40000 33 1
0a000 0 0 3 40000 30 1
0a000 0 0 4 40000 31 1
/* IDSEL 0x15 (Slot 4) */
0a800 0 0 1 40000 33 1
0a800 0 0 2 40000 30 1
0a800 0 0 3 40000 31 1
0a800 0 0 4 40000 32 1
/* Bus 1 (Tundra Bridge) */
/* IDSEL 0x12 (ISA bridge) */
19000 0 0 1 40000 30 1
19000 0 0 2 40000 31 1
19000 0 0 3 40000 32 1
19000 0 0 4 40000 33 1>;
interrupt-parent = <40000>;
interrupts = <08 2>;
bus-range = <0 0>;
ranges = <02000000 0 80000000 80000000 0 20000000
01000000 0 00000000 e2000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
compatible = "85xx";
device_type = "pci";
i8259@19000 {
clock-frequency = <0>;
interrupt-controller;
device_type = "interrupt-controller";
reg = <19000 0 0 0 1>;
#address-cells = <0>;
#interrupt-cells = <2>;
built-in;
compatible = "chrp,iic";
big-endian;
interrupts = <1>;
interrupt-parent = <8000>;
};
};
pci@9000 {
linux,phandle = <9000>;
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x15 */
a800 0 0 1 40000 3b 1
a800 0 0 2 40000 3b 1
a800 0 0 3 40000 3b 1
a800 0 0 4 40000 3b 1>;
interrupt-parent = <40000>;
interrupts = <09 2>;
bus-range = <0 0>;
ranges = <02000000 0 a0000000 a0000000 0 20000000
01000000 0 00000000 e3000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
compatible = "85xx";
device_type = "pci";
};
pic@40000 {
linux,phandle = <40000>;
clock-frequency = <0>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <40000 40000>;
built-in;
compatible = "chrp,open-pic";
device_type = "open-pic";
big-endian;
};
};
};

View file

@ -0,0 +1,244 @@
/*
* MPC8555 CDS Device Tree Source
*
* Copyright 2006 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/ {
model = "MPC8555CDS";
compatible = "MPC85xxCDS";
#address-cells = <1>;
#size-cells = <1>;
linux,phandle = <100>;
cpus {
#cpus = <1>;
#address-cells = <1>;
#size-cells = <0>;
linux,phandle = <200>;
PowerPC,8555@0 {
device_type = "cpu";
reg = <0>;
d-cache-line-size = <20>; // 32 bytes
i-cache-line-size = <20>; // 32 bytes
d-cache-size = <8000>; // L1, 32K
i-cache-size = <8000>; // L1, 32K
timebase-frequency = <0>; // 33 MHz, from uboot
bus-frequency = <0>; // 166 MHz
clock-frequency = <0>; // 825 MHz, from uboot
32-bit;
linux,phandle = <201>;
};
};
memory {
device_type = "memory";
linux,phandle = <300>;
reg = <00000000 08000000>; // 128M at 0x0
};
soc8555@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
#interrupt-cells = <2>;
device_type = "soc";
ranges = <0 e0000000 00100000>;
reg = <e0000000 00100000>; // CCSRBAR 1M
bus-frequency = <0>;
i2c@3000 {
device_type = "i2c";
compatible = "fsl-i2c";
reg = <3000 100>;
interrupts = <1b 2>;
interrupt-parent = <40000>;
dfsrr;
};
mdio@24520 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "mdio";
compatible = "gianfar";
reg = <24520 20>;
linux,phandle = <24520>;
ethernet-phy@0 {
linux,phandle = <2452000>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <0>;
device_type = "ethernet-phy";
};
ethernet-phy@1 {
linux,phandle = <2452001>;
interrupt-parent = <40000>;
interrupts = <35 0>;
reg = <1>;
device_type = "ethernet-phy";
};
};
ethernet@24000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <24000 1000>;
local-mac-address = [ 00 E0 0C 00 73 00 ];
interrupts = <0d 2 0e 2 12 2>;
interrupt-parent = <40000>;
phy-handle = <2452000>;
};
ethernet@25000 {
#address-cells = <1>;
#size-cells = <0>;
device_type = "network";
model = "TSEC";
compatible = "gianfar";
reg = <25000 1000>;
local-mac-address = [ 00 E0 0C 00 73 01 ];
interrupts = <13 2 14 2 18 2>;
interrupt-parent = <40000>;
phy-handle = <2452001>;
};
serial@4500 {
device_type = "serial";
compatible = "ns16550";
reg = <4500 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
serial@4600 {
device_type = "serial";
compatible = "ns16550";
reg = <4600 100>; // reg base, size
clock-frequency = <0>; // should we fill in in uboot?
interrupts = <1a 2>;
interrupt-parent = <40000>;
};
pci@8000 {
linux,phandle = <8000>;
interrupt-map-mask = <1f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x10 */
08000 0 0 1 40000 30 1
08000 0 0 2 40000 31 1
08000 0 0 3 40000 32 1
08000 0 0 4 40000 33 1
/* IDSEL 0x11 */
08800 0 0 1 40000 30 1
08800 0 0 2 40000 31 1
08800 0 0 3 40000 32 1
08800 0 0 4 40000 33 1
/* IDSEL 0x12 (Slot 1) */
09000 0 0 1 40000 30 1
09000 0 0 2 40000 31 1
09000 0 0 3 40000 32 1
09000 0 0 4 40000 33 1
/* IDSEL 0x13 (Slot 2) */
09800 0 0 1 40000 31 1
09800 0 0 2 40000 32 1
09800 0 0 3 40000 33 1
09800 0 0 4 40000 30 1
/* IDSEL 0x14 (Slot 3) */
0a000 0 0 1 40000 32 1
0a000 0 0 2 40000 33 1
0a000 0 0 3 40000 30 1
0a000 0 0 4 40000 31 1
/* IDSEL 0x15 (Slot 4) */
0a800 0 0 1 40000 33 1
0a800 0 0 2 40000 30 1
0a800 0 0 3 40000 31 1
0a800 0 0 4 40000 32 1
/* Bus 1 (Tundra Bridge) */
/* IDSEL 0x12 (ISA bridge) */
19000 0 0 1 40000 30 1
19000 0 0 2 40000 31 1
19000 0 0 3 40000 32 1
19000 0 0 4 40000 33 1>;
interrupt-parent = <40000>;
interrupts = <08 2>;
bus-range = <0 0>;
ranges = <02000000 0 80000000 80000000 0 20000000
01000000 0 00000000 e2000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <8000 1000>;
compatible = "85xx";
device_type = "pci";
i8259@19000 {
clock-frequency = <0>;
interrupt-controller;
device_type = "interrupt-controller";
reg = <19000 0 0 0 1>;
#address-cells = <0>;
#interrupt-cells = <2>;
built-in;
compatible = "chrp,iic";
big-endian;
interrupts = <1>;
interrupt-parent = <8000>;
};
};
pci@9000 {
linux,phandle = <9000>;
interrupt-map-mask = <f800 0 0 7>;
interrupt-map = <
/* IDSEL 0x15 */
a800 0 0 1 40000 3b 1
a800 0 0 2 40000 3b 1
a800 0 0 3 40000 3b 1
a800 0 0 4 40000 3b 1>;
interrupt-parent = <40000>;
interrupts = <09 2>;
bus-range = <0 0>;
ranges = <02000000 0 a0000000 a0000000 0 20000000
01000000 0 00000000 e3000000 0 00100000>;
clock-frequency = <3f940aa>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
reg = <9000 1000>;
compatible = "85xx";
device_type = "pci";
};
pic@40000 {
linux,phandle = <40000>;
clock-frequency = <0>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
reg = <40000 40000>;
built-in;
compatible = "chrp,open-pic";
device_type = "open-pic";
big-endian;
};
};
};

View file

@ -115,6 +115,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
u64 addr;
u32 *addrp;
upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
struct device_node *tsi = of_get_parent(np);
/* We only support ports that have a clock frequency properly
* encoded in the device-tree.
@ -134,7 +135,10 @@ static int __init add_legacy_soc_port(struct device_node *np,
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
if (tsi && !strcmp(tsi->type, "tsi-bridge"))
return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0);
else
return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0);
}
static int __init add_legacy_isa_port(struct device_node *np,
@ -464,7 +468,7 @@ static int __init serial_dev_init(void)
fixup_port_irq(i, np, port);
if (port->iotype == UPIO_PORT)
fixup_port_pio(i, np, port);
if (port->iotype == UPIO_MEM)
if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI))
fixup_port_mmio(i, np, port);
}

View file

@ -598,11 +598,6 @@ static struct device_node *of_irq_find_parent(struct device_node *child)
return p;
}
static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
{
return (((pin - 1) + slot) % 4) + 1;
}
/* This doesn't need to be called if you don't have any special workaround
* flags to pass
*/
@ -891,6 +886,12 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
}
EXPORT_SYMBOL_GPL(of_irq_map_one);
#ifdef CONFIG_PCI
static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
{
return (((pin - 1) + slot) % 4) + 1;
}
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
@ -967,4 +968,4 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
return of_irq_map_raw(ppnode, &lspec, laddr, out_irq);
}
EXPORT_SYMBOL_GPL(of_irq_map_pci);
#endif /* CONFIG_PCI */

View file

@ -417,7 +417,7 @@ static __inline__ void timer_check_rtc(void)
/*
* This version of gettimeofday has microsecond resolution.
*/
static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
static inline void __do_gettimeofday(struct timeval *tv)
{
unsigned long sec, usec;
u64 tb_ticks, xsec;
@ -431,7 +431,12 @@ static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
* without a divide (and in fact, without a multiply)
*/
temp_varp = do_gtod.varp;
tb_ticks = tb_val - temp_varp->tb_orig_stamp;
/* Sampling the time base must be done after loading
* do_gtod.varp in order to avoid racing with update_gtod.
*/
data_barrier(temp_varp);
tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
temp_tb_to_xs = temp_varp->tb_to_xs;
temp_stamp_xsec = temp_varp->stamp_xsec;
xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
@ -464,7 +469,7 @@ void do_gettimeofday(struct timeval *tv)
tv->tv_usec = usec;
return;
}
__do_gettimeofday(tv, get_tb());
__do_gettimeofday(tv);
}
EXPORT_SYMBOL(do_gettimeofday);
@ -650,6 +655,7 @@ void timer_interrupt(struct pt_regs * regs)
int next_dec;
int cpu = smp_processor_id();
unsigned long ticks;
u64 tb_next_jiffy;
#ifdef CONFIG_PPC32
if (atomic_read(&ppc_n_lost_interrupts) != 0)
@ -691,11 +697,14 @@ void timer_interrupt(struct pt_regs * regs)
continue;
write_seqlock(&xtime_lock);
tb_last_jiffy += tb_ticks_per_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
do_timer(regs);
timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
tb_last_jiffy = tb_next_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
do_timer(regs);
timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
}
write_sequnlock(&xtime_lock);
}

View file

@ -585,14 +585,14 @@ static void parse_fpe(struct pt_regs *regs)
#define INST_MFSPR_PVR_MASK 0xfc1fffff
#define INST_DCBA 0x7c0005ec
#define INST_DCBA_MASK 0x7c0007fe
#define INST_DCBA_MASK 0xfc0007fe
#define INST_MCRXR 0x7c000400
#define INST_MCRXR_MASK 0x7c0007fe
#define INST_MCRXR_MASK 0xfc0007fe
#define INST_STRING 0x7c00042a
#define INST_STRING_MASK 0x7c0007fe
#define INST_STRING_GEN_MASK 0x7c00067e
#define INST_STRING_MASK 0xfc0007fe
#define INST_STRING_GEN_MASK 0xfc00067e
#define INST_LSWI 0x7c0004aa
#define INST_LSWX 0x7c00042a
#define INST_STSWI 0x7c0005aa

View file

@ -153,7 +153,7 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
hpdp->pd = 0;
tlb->need_flush = 1;
pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
HUGEPTE_TABLE_SIZE-1));
PGF_CACHENUM_MASK));
}
#ifdef CONFIG_PPC_64K_PAGES

View file

@ -14,7 +14,6 @@ config MPC8540_ADS
config MPC85xx_CDS
bool "Freescale MPC85xx CDS"
select DEFAULT_UIMAGE
select PPC_I8259 if PCI
help
This option enables support for the MPC85xx CDS board

View file

@ -37,79 +37,7 @@ unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
#endif
/*
* Internal interrupts are all Level Sensitive, and Positive Polarity
*
* Note: Likely, this table and the following function should be
* obtained and derived from the OF Device Tree.
*/
static u_char mpc85xx_ads_openpic_initsenses[] __initdata = {
MPC85XX_INTERNAL_IRQ_SENSES,
0x0, /* External 0: */
#if defined(CONFIG_PCI)
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 4: PCI slot 3 */
#else
0x0, /* External 1: */
0x0, /* External 2: */
0x0, /* External 3: */
0x0, /* External 4: */
#endif
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
0x0, /* External 6: */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */
0x0, /* External 8: */
0x0, /* External 9: */
0x0, /* External 10: */
0x0, /* External 11: */
};
#ifdef CONFIG_PCI
/*
* interrupt routing
*/
int
mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
static char pci_irq_table[][4] =
/*
* This is little evil, but works around the fact
* that revA boards have IDSEL starting at 18
* and others boards (older) start at 12
*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
{
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 2 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 5 */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 12 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 15 */
{0, 0, 0, 0}, /* -- */
{0, 0, 0, 0}, /* -- */
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 18 */
{PIRQD, PIRQA, PIRQB, PIRQC},
{PIRQC, PIRQD, PIRQA, PIRQB},
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 21 */
};
const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
}
int
mpc85xx_exclude_device(u_char bus, u_char devfn)
{
@ -119,44 +47,63 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
return PCIBIOS_SUCCESSFUL;
}
void __init
mpc85xx_pcibios_fixup(void)
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev)
pci_read_irq_line(dev);
}
#endif /* CONFIG_PCI */
void __init mpc85xx_ads_pic_init(void)
{
struct mpic *mpic1;
phys_addr_t OpenPIC_PAddr;
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
/* Determine the Physical Address of the OpenPIC regs */
OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET;
np = of_find_node_by_type(np, "open-pic");
mpic1 = mpic_alloc(OpenPIC_PAddr,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250,
mpc85xx_ads_openpic_initsenses,
sizeof(mpc85xx_ads_openpic_initsenses),
" OpenPIC ");
BUG_ON(mpic1 == NULL);
mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200);
mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280);
mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300);
mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380);
mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400);
mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480);
mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500);
mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580);
if (np == NULL) {
printk(KERN_ERR "Could not find open-pic node\n");
return;
}
/* dummy mappings to get to 48 */
mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600);
mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680);
mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700);
mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780);
if(of_address_to_resource(np, 0, &r)) {
printk(KERN_ERR "Could not map mpic register space\n");
of_node_put(np);
return;
}
/* External ints */
mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000);
mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080);
mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100);
mpic_init(mpic1);
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
4, 0, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
mpic_assign_isu(mpic, 0, r.start + 0x10200);
mpic_assign_isu(mpic, 1, r.start + 0x10280);
mpic_assign_isu(mpic, 2, r.start + 0x10300);
mpic_assign_isu(mpic, 3, r.start + 0x10380);
mpic_assign_isu(mpic, 4, r.start + 0x10400);
mpic_assign_isu(mpic, 5, r.start + 0x10480);
mpic_assign_isu(mpic, 6, r.start + 0x10500);
mpic_assign_isu(mpic, 7, r.start + 0x10580);
/* Unused on this platform (leave room for 8548) */
mpic_assign_isu(mpic, 8, r.start + 0x10600);
mpic_assign_isu(mpic, 9, r.start + 0x10680);
mpic_assign_isu(mpic, 10, r.start + 0x10700);
mpic_assign_isu(mpic, 11, r.start + 0x10780);
/* External Interrupts */
mpic_assign_isu(mpic, 12, r.start + 0x10000);
mpic_assign_isu(mpic, 13, r.start + 0x10080);
mpic_assign_isu(mpic, 14, r.start + 0x10100);
mpic_init(mpic);
}
/*
@ -165,7 +112,9 @@ void __init mpc85xx_ads_pic_init(void)
static void __init mpc85xx_ads_setup_arch(void)
{
struct device_node *cpu;
#ifdef CONFIG_PCI
struct device_node *np;
#endif
if (ppc_md.progress)
ppc_md.progress("mpc85xx_ads_setup_arch()", 0);
@ -186,8 +135,7 @@ static void __init mpc85xx_ads_setup_arch(void)
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc85xx_map_irq;
ppc_md.pcibios_fixup = mpc85xx_pcibios_fixup;
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif

View file

@ -57,94 +57,8 @@ unsigned long isa_mem_base = 0;
static int cds_pci_slot = 2;
static volatile u8 *cadmus;
/*
* Internal interrupts are all Level Sensitive, and Positive Polarity
*
* Note: Likely, this table and the following function should be
* obtained and derived from the OF Device Tree.
*/
static u_char mpc85xx_cds_openpic_initsenses[] __initdata = {
MPC85XX_INTERNAL_IRQ_SENSES,
#if defined(CONFIG_PCI)
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Ext 0: PCI slot 0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 1: PCI slot 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 2: PCI slot 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 3: PCI slot 3 */
#else
0x0, /* External 0: */
0x0, /* External 1: */
0x0, /* External 2: */
0x0, /* External 3: */
#endif
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */
0x0, /* External 6: */
0x0, /* External 7: */
0x0, /* External 8: */
0x0, /* External 9: */
0x0, /* External 10: */
#ifdef CONFIG_PCI
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* Ext 11: PCI2 slot 0 */
#else
0x0, /* External 11: */
#endif
};
#ifdef CONFIG_PCI
/*
* interrupt routing
*/
int
mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
if (!hose->index)
{
/* Handle PCI1 interrupts */
char pci_irq_table[][4] =
/*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
/* Note IRQ assignment for slots is based on which slot the elysium is
* in -- in this setup elysium is in slot #2 (this PIRQA as first
* interrupt on slot */
{
{ 0, 1, 2, 3 }, /* 16 - PMC */
{ 0, 1, 2, 3 }, /* 17 P2P (Tsi320) */
{ 0, 1, 2, 3 }, /* 18 - Slot 1 */
{ 1, 2, 3, 0 }, /* 19 - Slot 2 */
{ 2, 3, 0, 1 }, /* 20 - Slot 3 */
{ 3, 0, 1, 2 }, /* 21 - Slot 4 */
};
const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4;
int i, j;
for (i = 0; i < 6; i++)
for (j = 0; j < 4; j++)
pci_irq_table[i][j] =
((pci_irq_table[i][j] + 5 -
cds_pci_slot) & 0x3) + PIRQ0A;
return PCI_IRQ_TABLE_LOOKUP;
} else {
/* Handle PCI2 interrupts (if we have one) */
char pci_irq_table[][4] =
{
/*
* We only have one slot and one interrupt
* going to PIRQA - PIRQD */
{ PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */
};
const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP;
}
}
#define ARCADIA_HOST_BRIDGE_IDSEL 17
#define ARCADIA_2ND_BRIDGE_IDSEL 3
@ -210,50 +124,104 @@ mpc85xx_cds_pcibios_fixup(void)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
pci_dev_put(dev);
}
/* Now map all the PCI irqs */
dev = NULL;
for_each_pci_dev(dev)
pci_read_irq_line(dev);
}
#ifdef CONFIG_PPC_I8259
#warning The i8259 PIC support is currently broken
static void mpc85xx_8259_cascade(unsigned int irq, struct
irq_desc *desc, struct pt_regs *regs)
{
unsigned int cascade_irq = i8259_irq(regs);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq, regs);
desc->chip->eoi(irq);
}
#endif /* PPC_I8259 */
#endif /* CONFIG_PCI */
void __init mpc85xx_cds_pic_init(void)
{
struct mpic *mpic1;
phys_addr_t OpenPIC_PAddr;
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
struct device_node *cascade_node = NULL;
int cascade_irq;
/* Determine the Physical Address of the OpenPIC regs */
OpenPIC_PAddr = get_immrbase() + MPC85xx_OPENPIC_OFFSET;
np = of_find_node_by_type(np, "open-pic");
mpic1 = mpic_alloc(OpenPIC_PAddr,
if (np == NULL) {
printk(KERN_ERR "Could not find open-pic node\n");
return;
}
if (of_address_to_resource(np, 0, &r)) {
printk(KERN_ERR "Failed to map mpic register space\n");
of_node_put(np);
return;
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
4, MPC85xx_OPENPIC_IRQ_OFFSET, 0, 250,
mpc85xx_cds_openpic_initsenses,
sizeof(mpc85xx_cds_openpic_initsenses), " OpenPIC ");
BUG_ON(mpic1 == NULL);
mpic_assign_isu(mpic1, 0, OpenPIC_PAddr + 0x10200);
mpic_assign_isu(mpic1, 1, OpenPIC_PAddr + 0x10280);
mpic_assign_isu(mpic1, 2, OpenPIC_PAddr + 0x10300);
mpic_assign_isu(mpic1, 3, OpenPIC_PAddr + 0x10380);
mpic_assign_isu(mpic1, 4, OpenPIC_PAddr + 0x10400);
mpic_assign_isu(mpic1, 5, OpenPIC_PAddr + 0x10480);
mpic_assign_isu(mpic1, 6, OpenPIC_PAddr + 0x10500);
mpic_assign_isu(mpic1, 7, OpenPIC_PAddr + 0x10580);
4, 0, " OpenPIC ");
BUG_ON(mpic == NULL);
/* dummy mappings to get to 48 */
mpic_assign_isu(mpic1, 8, OpenPIC_PAddr + 0x10600);
mpic_assign_isu(mpic1, 9, OpenPIC_PAddr + 0x10680);
mpic_assign_isu(mpic1, 10, OpenPIC_PAddr + 0x10700);
mpic_assign_isu(mpic1, 11, OpenPIC_PAddr + 0x10780);
/* Return the mpic node */
of_node_put(np);
/* External ints */
mpic_assign_isu(mpic1, 12, OpenPIC_PAddr + 0x10000);
mpic_assign_isu(mpic1, 13, OpenPIC_PAddr + 0x10080);
mpic_assign_isu(mpic1, 14, OpenPIC_PAddr + 0x10100);
mpic_assign_isu(mpic, 0, r.start + 0x10200);
mpic_assign_isu(mpic, 1, r.start + 0x10280);
mpic_assign_isu(mpic, 2, r.start + 0x10300);
mpic_assign_isu(mpic, 3, r.start + 0x10380);
mpic_assign_isu(mpic, 4, r.start + 0x10400);
mpic_assign_isu(mpic, 5, r.start + 0x10480);
mpic_assign_isu(mpic, 6, r.start + 0x10500);
mpic_assign_isu(mpic, 7, r.start + 0x10580);
mpic_init(mpic1);
/* Used only for 8548 so far, but no harm in
* allocating them for everyone */
mpic_assign_isu(mpic, 8, r.start + 0x10600);
mpic_assign_isu(mpic, 9, r.start + 0x10680);
mpic_assign_isu(mpic, 10, r.start + 0x10700);
mpic_assign_isu(mpic, 11, r.start + 0x10780);
#ifdef CONFIG_PCI
mpic_setup_cascade(PIRQ0A, i8259_irq_cascade, NULL);
/* External Interrupts */
mpic_assign_isu(mpic, 12, r.start + 0x10000);
mpic_assign_isu(mpic, 13, r.start + 0x10080);
mpic_assign_isu(mpic, 14, r.start + 0x10100);
i8259_init(0,0);
#endif
mpic_init(mpic);
#ifdef CONFIG_PPC_I8259
/* Initialize the i8259 controller */
for_each_node_by_type(np, "interrupt-controller")
if (device_is_compatible(np, "chrp,iic")) {
cascade_node = np;
break;
}
if (cascade_node == NULL) {
printk(KERN_DEBUG "Could not find i8259 PIC\n");
return;
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
if (cascade_irq == NO_IRQ) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
return;
}
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade);
#endif /* CONFIG_PPC_I8259 */
}
@ -298,8 +266,6 @@ mpc85xx_cds_setup_arch(void)
add_bridge(np);
ppc_md.pcibios_fixup = mpc85xx_cds_pcibios_fixup;
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc85xx_map_irq;
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif

View file

@ -16,38 +16,6 @@
#include <linux/init.h>
/* PCI interrupt controller */
#define PIRQA 3
#define PIRQB 4
#define PIRQC 5
#define PIRQD 6
#define PIRQ7 7
#define PIRQE 9
#define PIRQF 10
#define PIRQG 11
#define PIRQH 12
/* PCI-Express memory map */
#define MPC86XX_PCIE_LOWER_IO 0x00000000
#define MPC86XX_PCIE_UPPER_IO 0x00ffffff
#define MPC86XX_PCIE_LOWER_MEM 0x80000000
#define MPC86XX_PCIE_UPPER_MEM 0x9fffffff
#define MPC86XX_PCIE_IO_BASE 0xe2000000
#define MPC86XX_PCIE_MEM_OFFSET 0x00000000
#define MPC86XX_PCIE_IO_SIZE 0x01000000
#define PCIE1_CFG_ADDR_OFFSET (0x8000)
#define PCIE1_CFG_DATA_OFFSET (0x8004)
#define PCIE2_CFG_ADDR_OFFSET (0x9000)
#define PCIE2_CFG_DATA_OFFSET (0x9004)
#define MPC86xx_PCIE_OFFSET PCIE1_CFG_ADDR_OFFSET
#define MPC86xx_PCIE_SIZE (0x1000)
#define MPC86XX_RSTCR_OFFSET (0xe00b0) /* Reset Control Register */
#endif /* __MPC8641_HPCN_H__ */

View file

@ -37,6 +37,14 @@
#include "mpc86xx.h"
#include "mpc8641_hpcn.h"
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(KERN_ERR fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
#ifndef CONFIG_PCI
unsigned long isa_io_base = 0;
unsigned long isa_mem_base = 0;
@ -44,205 +52,215 @@ unsigned long pci_dram_offset = 0;
#endif
/*
* Internal interrupts are all Level Sensitive, and Positive Polarity
*/
static u_char mpc86xx_hpcn_openpic_initsenses[] __initdata = {
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: Reserved */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: MCM */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCIE1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: PCIE2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: Reserved */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: Reserved */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: DUART2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 1 Transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 1 Receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: TSEC 3 transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: TSEC 3 receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: TSEC 3 error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 1 Receive/Transmit Error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 2 Transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 2 Receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: TSEC 4 transmit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: TSEC 4 receive */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: TSEC 4 error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 2 Receive/Transmit Error */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 32: SRIO error/write-port unit */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 33: SRIO outbound doorbell */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 34: SRIO inbound doorbell */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 35: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 36: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 37: SRIO outbound message unit 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 38: SRIO inbound message unit 1 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 39: SRIO outbound message unit 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 40: SRIO inbound message unit 2 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 41: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 42: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 43: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 44: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 45: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 46: Unused */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 47: Unused */
0x0, /* External 0: */
0x0, /* External 1: */
0x0, /* External 2: */
0x0, /* External 3: */
0x0, /* External 4: */
0x0, /* External 5: */
0x0, /* External 6: */
0x0, /* External 7: */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 8: Pixis FPGA */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: ULI 8259 INTR Cascade */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 10: Quad ETH PHY */
0x0, /* External 11: */
0x0,
0x0,
0x0,
0x0,
};
static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
unsigned int cascade_irq = i8259_irq(regs);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq, regs);
desc->chip->eoi(irq);
}
void __init
mpc86xx_hpcn_init_irq(void)
{
struct mpic *mpic1;
struct device_node *np, *cascade_node = NULL;
int cascade_irq;
phys_addr_t openpic_paddr;
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL)
return;
/* Determine the Physical Address of the OpenPIC regs */
openpic_paddr = get_immrbase() + MPC86xx_OPENPIC_OFFSET;
/* Alloc mpic structure and per isu has 16 INT entries. */
mpic1 = mpic_alloc(openpic_paddr,
mpic1 = mpic_alloc(np, openpic_paddr,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
16, MPC86xx_OPENPIC_IRQ_OFFSET, 0, 250,
mpc86xx_hpcn_openpic_initsenses,
sizeof(mpc86xx_hpcn_openpic_initsenses),
16, NR_IRQS - 4,
" MPIC ");
BUG_ON(mpic1 == NULL);
/* 48 Internal Interrupts */
mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10200);
mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10400);
mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10600);
mpic_assign_isu(mpic1, 0, openpic_paddr + 0x10000);
/* 16 External interrupts */
mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10000);
/* 48 Internal Interrupts */
mpic_assign_isu(mpic1, 1, openpic_paddr + 0x10200);
mpic_assign_isu(mpic1, 2, openpic_paddr + 0x10400);
mpic_assign_isu(mpic1, 3, openpic_paddr + 0x10600);
/* 16 External interrupts
* Moving them from [0 - 15] to [64 - 79]
*/
mpic_assign_isu(mpic1, 4, openpic_paddr + 0x10000);
mpic_init(mpic1);
#ifdef CONFIG_PCI
mpic_setup_cascade(MPC86xx_IRQ_EXT9, i8259_irq_cascade, NULL);
i8259_init(0, I8259_OFFSET);
/* Initialize i8259 controller */
for_each_node_by_type(np, "interrupt-controller")
if (device_is_compatible(np, "chrp,iic")) {
cascade_node = np;
break;
}
if (cascade_node == NULL) {
printk(KERN_DEBUG "mpc86xxhpcn: no ISA interrupt controller\n");
return;
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
if (cascade_irq == NO_IRQ) {
printk(KERN_ERR "mpc86xxhpcn: failed to map cascade interrupt");
return;
}
DBG("mpc86xxhpcn: cascade mapped to irq %d\n", cascade_irq);
i8259_init(cascade_node, 0);
set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade);
#endif
}
#ifdef CONFIG_PCI
/*
* interrupt routing
*/
int
mpc86xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
enum pirq{PIRQA = 8, PIRQB, PIRQC, PIRQD, PIRQE, PIRQF, PIRQG, PIRQH};
const unsigned char uli1575_irq_route_table[16] = {
0, /* 0: Reserved */
0x8, /* 1: 0b1000 */
0, /* 2: Reserved */
0x2, /* 3: 0b0010 */
0x4, /* 4: 0b0100 */
0x5, /* 5: 0b0101 */
0x7, /* 6: 0b0111 */
0x6, /* 7: 0b0110 */
0, /* 8: Reserved */
0x1, /* 9: 0b0001 */
0x3, /* 10: 0b0011 */
0x9, /* 11: 0b1001 */
0xb, /* 12: 0b1011 */
0, /* 13: Reserved */
0xd, /* 14, 0b1101 */
0xf, /* 15, 0b1111 */
};
static int __devinit
get_pci_irq_from_of(struct pci_controller *hose, int slot, int pin)
{
static char pci_irq_table[][4] = {
/*
* PCI IDSEL/INTPIN->INTLINE
* A B C D
*/
{PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 17 -- PCI Slot 1 */
{PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 18 -- PCI Slot 2 */
{0, 0, 0, 0}, /* IDSEL 19 */
{0, 0, 0, 0}, /* IDSEL 20 */
{0, 0, 0, 0}, /* IDSEL 21 */
{0, 0, 0, 0}, /* IDSEL 22 */
{0, 0, 0, 0}, /* IDSEL 23 */
{0, 0, 0, 0}, /* IDSEL 24 */
{0, 0, 0, 0}, /* IDSEL 25 */
{PIRQD, PIRQA, PIRQB, PIRQC}, /* IDSEL 26 -- PCI Bridge*/
{PIRQC, 0, 0, 0}, /* IDSEL 27 -- LAN */
{PIRQE, PIRQF, PIRQH, PIRQ7}, /* IDSEL 28 -- USB 1.1 */
{PIRQE, PIRQF, PIRQG, 0}, /* IDSEL 29 -- Audio & Modem */
{PIRQH, 0, 0, 0}, /* IDSEL 30 -- LPC & PMU*/
{PIRQD, 0, 0, 0}, /* IDSEL 31 -- ATA */
};
struct of_irq oirq;
u32 laddr[3];
struct device_node *hosenode = hose ? hose->arch_data : NULL;
const long min_idsel = 17, max_idsel = 31, irqs_per_slot = 4;
return PCI_IRQ_TABLE_LOOKUP + I8259_OFFSET;
if (!hosenode) return -EINVAL;
laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(slot, 0) << 8);
laddr[1] = laddr[2] = 0;
of_irq_map_raw(hosenode, &pin, laddr, &oirq);
DBG("mpc86xx_hpcn: pci irq addr %x, slot %d, pin %d, irq %d\n",
laddr[0], slot, pin, oirq.specifier[0]);
return oirq.specifier[0];
}
static void __devinit quirk_ali1575(struct pci_dev *dev)
static void __devinit quirk_uli1575(struct pci_dev *dev)
{
unsigned short temp;
struct pci_controller *hose = pci_bus_to_host(dev->bus);
unsigned char irq2pin[16];
unsigned long pirq_map_word = 0;
u32 irq;
int i;
/*
* ALI1575 interrupts route table setup:
*
* IRQ pin IRQ#
* PIRQA ---- 3
* PIRQB ---- 4
* PIRQC ---- 5
* PIRQD ---- 6
* PIRQE ---- 9
* PIRQF ---- 10
* PIRQG ---- 11
* PIRQH ---- 12
* ULI1575 interrupts route setup
*/
memset(irq2pin, 0, 16); /* Initialize default value 0 */
/*
* PIRQA -> PIRQD mapping read from OF-tree
*
* interrupts for PCI slot0 -- PIRQA / PIRQB / PIRQC / PIRQD
* PCI slot1 -- PIRQB / PIRQC / PIRQD / PIRQA
*/
pci_write_config_dword(dev, 0x48, 0xb9317542);
for (i = 0; i < 4; i++){
irq = get_pci_irq_from_of(hose, 17, i + 1);
if (irq > 0 && irq < 16)
irq2pin[irq] = PIRQA + i;
else
printk(KERN_WARNING "ULI1575 device"
"(slot %d, pin %d) irq %d is invalid.\n",
17, i, irq);
}
/* USB 1.1 OHCI controller 1, interrupt: PIRQE */
pci_write_config_byte(dev, 0x86, 0x0c);
/*
* PIRQE -> PIRQF mapping set manually
*
* IRQ pin IRQ#
* PIRQE ---- 9
* PIRQF ---- 10
* PIRQG ---- 11
* PIRQH ---- 12
*/
for (i = 0; i < 4; i++) irq2pin[i + 9] = PIRQE + i;
/* USB 1.1 OHCI controller 2, interrupt: PIRQF */
pci_write_config_byte(dev, 0x87, 0x0d);
/* Set IRQ-PIRQ Mapping to ULI1575 */
for (i = 0; i < 16; i++)
if (irq2pin[i])
pirq_map_word |= (uli1575_irq_route_table[i] & 0xf)
<< ((irq2pin[i] - PIRQA) * 4);
/* USB 1.1 OHCI controller 3, interrupt: PIRQH */
pci_write_config_byte(dev, 0x88, 0x0f);
/* ULI1575 IRQ mapping conf register default value is 0xb9317542 */
DBG("Setup ULI1575 IRQ mapping configuration register value = 0x%x\n",
pirq_map_word);
pci_write_config_dword(dev, 0x48, pirq_map_word);
/* USB 2.0 controller, interrupt: PIRQ7 */
pci_write_config_byte(dev, 0x74, 0x06);
#define ULI1575_SET_DEV_IRQ(slot, pin, reg) \
do { \
int irq; \
irq = get_pci_irq_from_of(hose, slot, pin); \
if (irq > 0 && irq < 16) \
pci_write_config_byte(dev, reg, irq2pin[irq]); \
else \
printk(KERN_WARNING "ULI1575 device" \
"(slot %d, pin %d) irq %d is invalid.\n", \
slot, pin, irq); \
} while(0)
/* Audio controller, interrupt: PIRQE */
pci_write_config_byte(dev, 0x8a, 0x0c);
/* USB 1.1 OHCI controller 1, slot 28, pin 1 */
ULI1575_SET_DEV_IRQ(28, 1, 0x86);
/* Modem controller, interrupt: PIRQF */
pci_write_config_byte(dev, 0x8b, 0x0d);
/* USB 1.1 OHCI controller 2, slot 28, pin 2 */
ULI1575_SET_DEV_IRQ(28, 2, 0x87);
/* HD audio controller, interrupt: PIRQG */
pci_write_config_byte(dev, 0x8c, 0x0e);
/* USB 1.1 OHCI controller 3, slot 28, pin 3 */
ULI1575_SET_DEV_IRQ(28, 3, 0x88);
/* Serial ATA interrupt: PIRQD */
pci_write_config_byte(dev, 0x8d, 0x0b);
/* USB 2.0 controller, slot 28, pin 4 */
irq = get_pci_irq_from_of(hose, 28, 4);
if (irq >= 0 && irq <=15)
pci_write_config_dword(dev, 0x74, uli1575_irq_route_table[irq]);
/* SMB interrupt: PIRQH */
pci_write_config_byte(dev, 0x8e, 0x0f);
/* Audio controller, slot 29, pin 1 */
ULI1575_SET_DEV_IRQ(29, 1, 0x8a);
/* PMU ACPI SCI interrupt: PIRQH */
pci_write_config_byte(dev, 0x8f, 0x0f);
/* Modem controller, slot 29, pin 2 */
ULI1575_SET_DEV_IRQ(29, 2, 0x8b);
/* HD audio controller, slot 29, pin 3 */
ULI1575_SET_DEV_IRQ(29, 3, 0x8c);
/* SMB interrupt: slot 30, pin 1 */
ULI1575_SET_DEV_IRQ(30, 1, 0x8e);
/* PMU ACPI SCI interrupt: slot 30, pin 2 */
ULI1575_SET_DEV_IRQ(30, 2, 0x8f);
/* Serial ATA interrupt: slot 31, pin 1 */
ULI1575_SET_DEV_IRQ(31, 1, 0x8d);
/* Primary PATA IDE IRQ: 14
* Secondary PATA IDE IRQ: 15
*/
pci_write_config_byte(dev, 0x44, 0x3d);
pci_write_config_byte(dev, 0x75, 0x0f);
pci_write_config_byte(dev, 0x44, 0x30 | uli1575_irq_route_table[14]);
pci_write_config_byte(dev, 0x75, uli1575_irq_route_table[15]);
/* Set IRQ14 and IRQ15 to legacy IRQs */
pci_read_config_word(dev, 0x46, &temp);
@ -264,6 +282,8 @@ static void __devinit quirk_ali1575(struct pci_dev *dev)
*/
outb(0xfa, 0x4d0);
outb(0x1e, 0x4d1);
#undef ULI1575_SET_DEV_IRQ
}
static void __devinit quirk_uli5288(struct pci_dev *dev)
@ -306,7 +326,7 @@ static void __devinit early_uli5249(struct pci_dev *dev)
dev->class |= 0x1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_ali1575);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_uli1575);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249);
@ -337,8 +357,6 @@ mpc86xx_hpcn_setup_arch(void)
for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
add_bridge(np);
ppc_md.pci_swizzle = common_swizzle;
ppc_md.pci_map_irq = mpc86xx_map_irq;
ppc_md.pci_exclude_device = mpc86xx_exclude_device;
#endif
@ -377,6 +395,15 @@ mpc86xx_hpcn_show_cpuinfo(struct seq_file *m)
}
void __init mpc86xx_hpcn_pcibios_fixup(void)
{
struct pci_dev *dev = NULL;
for_each_pci_dev(dev)
pci_read_irq_line(dev);
}
/*
* Called very early, device-tree isn't unflattened
*/
@ -431,6 +458,7 @@ define_machine(mpc86xx_hpcn) {
.setup_arch = mpc86xx_hpcn_setup_arch,
.init_IRQ = mpc86xx_hpcn_init_irq,
.show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
.pcibios_fixup = mpc86xx_hpcn_pcibios_fixup,
.get_irq = mpic_get_irq,
.restart = mpc86xx_restart,
.time_init = mpc86xx_time_init,

View file

@ -1,7 +1,7 @@
/*
* mpc7448_hpc2.c
*
* Board setup routines for the Freescale Taiga platform
* Board setup routines for the Freescale mpc7448hpc2(taiga) platform
*
* Author: Jacob Pan
* jacob.pan@freescale.com
@ -12,10 +12,10 @@
*
* Copyright 2004-2006 Freescale Semiconductor, Inc.
*
* This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
@ -62,43 +62,8 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
extern int tsi108_setup_pci(struct device_node *dev);
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
extern void tsi108_pci_int_init(void);
extern int tsi108_irq_cascade(struct pt_regs *regs, void *unused);
/*
* Define all of the IRQ senses and polarities. Taken from the
* mpc7448hpc manual.
* Note: Likely, this table and the following function should be
* obtained and derived from the OF Device Tree.
*/
static u_char mpc7448_hpc2_pic_initsenses[] __initdata = {
/* External on-board sources */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[0] XINT0 from FPGA */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[1] XINT1 from FPGA */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[2] PHY_INT from both GIGE */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* INT[3] RESERVED */
/* Internal Tsi108/109 interrupt sources */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA0 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA2 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* DMA3 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART0 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* UART1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* I2C */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* GPIO */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE0 */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* GIGE1 */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* HLP */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* SDC */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Processor IF */
(IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE), /* Reserved IRQ */
(IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* PCI/X block */
};
extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs);
int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
{
@ -229,6 +194,8 @@ static void __init mpc7448_hpc2_init_IRQ(void)
{
struct mpic *mpic;
phys_addr_t mpic_paddr = 0;
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *tsi_pic;
tsi_pic = of_find_node_by_type(NULL, "open-pic");
@ -246,24 +213,31 @@ static void __init mpc7448_hpc2_init_IRQ(void)
DBG("%s: tsi108pic phys_addr = 0x%x\n", __FUNCTION__,
(u32) mpic_paddr);
mpic = mpic_alloc(mpic_paddr,
mpic = mpic_alloc(tsi_pic, mpic_paddr,
MPIC_PRIMARY | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
MPIC_SPV_EOI | MPIC_MOD_ID(MPIC_ID_TSI108),
0, /* num_sources used */
TSI108_IRQ_BASE,
0, /* num_sources used */
NR_IRQS - 4 /* XXXX */,
mpc7448_hpc2_pic_initsenses,
sizeof(mpc7448_hpc2_pic_initsenses), "Tsi108_PIC");
"Tsi108_PIC");
BUG_ON(mpic == NULL); /* XXXX */
mpic_init(mpic);
mpic_setup_cascade(IRQ_TSI108_PCI, tsi108_irq_cascade, mpic);
tsi_pci = of_find_node_by_type(NULL, "pci");
if (tsi_pci == 0) {
printk("%s: No tsi108 pci node found !\n", __FUNCTION__);
return;
}
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
set_irq_data(cascade_pci_irq, mpic);
set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
tsi108_pci_int_init();
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
of_node_put(tsi_pic);
}
void mpc7448_hpc2_show_cpuinfo(struct seq_file *m)
@ -320,6 +294,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
return 0;
}
define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,

View file

@ -411,8 +411,15 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
DBG("End of boot params: %x\n", mem_end);
rsvmap[0] = mem_start;
rsvmap[1] = mem_end;
rsvmap[2] = 0;
rsvmap[3] = 0;
if (bootx_info->ramDisk) {
rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk;
rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize;
rsvmap[4] = 0;
rsvmap[5] = 0;
} else {
rsvmap[2] = 0;
rsvmap[3] = 0;
}
return (unsigned long)hdr;
}
@ -543,12 +550,12 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
*/
if (bi->version < 5) {
space = bi->deviceTreeOffset + bi->deviceTreeSize;
if (bi->ramDisk)
if (bi->ramDisk >= space)
space = bi->ramDisk + bi->ramDiskSize;
} else
space = bi->totalParamsSize;
bootx_printf("Total space used by parameters & ramdisk: %x \n", space);
bootx_printf("Total space used by parameters & ramdisk: 0x%x \n", space);
/* New BootX will have flushed all TLBs and enters kernel with
* MMU switched OFF, so this should not be useful anymore.

View file

@ -85,11 +85,8 @@ static int __init gfar_mdio_of_init(void)
mdio_data.irq[k] = -1;
while ((child = of_get_next_child(np, child)) != NULL) {
if (child->n_intrs) {
u32 *id =
(u32 *) get_property(child, "reg", NULL);
mdio_data.irq[*id] = child->intrs[0].line;
}
u32 *id = get_property(child, "reg", NULL);
mdio_data.irq[*id] = irq_of_parse_and_map(child, 0);
}
ret =
@ -131,6 +128,7 @@ static int __init gfar_of_init(void)
char *model;
void *mac_addr;
phandle *ph;
int n_res = 1;
memset(r, 0, sizeof(r));
memset(&gfar_data, 0, sizeof(gfar_data));
@ -139,8 +137,7 @@ static int __init gfar_of_init(void)
if (ret)
goto err;
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
model = get_property(np, "model", NULL);
@ -150,19 +147,19 @@ static int __init gfar_of_init(void)
r[1].name = gfar_tx_intr;
r[2].name = gfar_rx_intr;
r[2].start = np->intrs[1].line;
r[2].end = np->intrs[1].line;
r[2].start = r[2].end = irq_of_parse_and_map(np, 1);
r[2].flags = IORESOURCE_IRQ;
r[3].name = gfar_err_intr;
r[3].start = np->intrs[2].line;
r[3].end = np->intrs[2].line;
r[3].start = r[3].end = irq_of_parse_and_map(np, 2);
r[3].flags = IORESOURCE_IRQ;
n_res += 2;
}
gfar_dev =
platform_device_register_simple("fsl-gianfar", i, &r[0],
np->n_intrs + 1);
n_res + 1);
if (IS_ERR(gfar_dev)) {
ret = PTR_ERR(gfar_dev);
@ -259,8 +256,7 @@ static int __init fsl_i2c_of_init(void)
if (ret)
goto err;
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
@ -396,8 +392,7 @@ static int __init fsl_usb_of_init(void)
if (ret)
goto err;
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
usb_dev_mph =
@ -445,8 +440,7 @@ static int __init fsl_usb_of_init(void)
if (ret)
goto unreg_mph;
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].start = r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
usb_dev_dr =

View file

@ -93,13 +93,15 @@ static int __init tsi108_eth_of_init(void)
goto err;
r[1].name = "tx";
r[1].start = np->intrs[0].line;
r[1].end = np->intrs[0].line;
r[1].start = irq_of_parse_and_map(np, 0);
r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
__FUNCTION__,r[1].name, r[1].start, r[1].end);
tsi_eth_dev =
platform_device_register_simple("tsi-ethernet", i, &r[0],
np->n_intrs + 1);
1);
if (IS_ERR(tsi_eth_dev)) {
ret = PTR_ERR(tsi_eth_dev);
@ -127,7 +129,7 @@ static int __init tsi108_eth_of_init(void)
tsi_eth_data.regs = r[0].start;
tsi_eth_data.phyregs = res.start;
tsi_eth_data.phy = *phy_id;
tsi_eth_data.irq_num = np->intrs[0].line;
tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0);
of_node_put(phy);
ret =
platform_device_add_data(tsi_eth_dev, &tsi_eth_data,

View file

@ -26,7 +26,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
@ -228,7 +227,7 @@ int __init tsi108_setup_pci(struct device_node *dev)
(hose)->ops = &tsi108_direct_pci_ops;
printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08lx. "
printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
"Firmware bus number: %d->%d\n",
rsrc.start, hose->first_busno, hose->last_busno);
@ -278,7 +277,7 @@ static void init_pci_source(void)
mb();
}
static inline int get_pci_source(void)
static inline unsigned int get_pci_source(void)
{
u_int temp = 0;
int irq = -1;
@ -371,12 +370,12 @@ static void tsi108_pci_irq_end(u_int irq)
* Interrupt controller descriptor for cascaded PCI interrupt controller.
*/
struct hw_interrupt_type tsi108_pci_irq = {
static struct irq_chip tsi108_pci_irq = {
.typename = "tsi108_PCI_int",
.enable = tsi108_pci_irq_enable,
.disable = tsi108_pci_irq_disable,
.mask = tsi108_pci_irq_disable,
.ack = tsi108_pci_irq_ack,
.end = tsi108_pci_irq_end,
.unmask = tsi108_pci_irq_enable,
};
/*
@ -399,14 +398,18 @@ void __init tsi108_pci_int_init(void)
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
for (i = 0; i < NUM_PCI_IRQS; i++) {
irq_desc[i + IRQ_PCI_INTAD_BASE].handler = &tsi108_pci_irq;
irq_desc[i + IRQ_PCI_INTAD_BASE].chip = &tsi108_pci_irq;
irq_desc[i + IRQ_PCI_INTAD_BASE].status |= IRQ_LEVEL;
}
init_pci_source();
}
int tsi108_irq_cascade(struct pt_regs *regs, void *unused)
void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc,
struct pt_regs *regs)
{
return get_pci_source();
unsigned int cascade_irq = get_pci_source();
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq, regs);
desc->chip->eoi(irq);
}

View file

@ -348,9 +348,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs;
smp_setup_cpu_possible_map();
paging_init();
smp_setup_cpu_possible_map();
}
static int __init set_preferred_console(void)

View file

@ -34,7 +34,6 @@
#include <asm/tlbflush.h>
#include <asm/cpudata.h>
volatile int smp_processors_ready = 0;
int smp_num_cpus = 1;
volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
unsigned char boot_cpu_id = 0;

View file

@ -42,7 +42,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
static volatile int smp_processors_ready = 0;
static int smp_highest_cpu;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern cpuinfo_sparc cpu_data[NR_CPUS];

View file

@ -39,7 +39,6 @@ extern ctxd_t *srmmu_ctx_table_phys;
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
@ -217,7 +216,6 @@ void __init smp4m_smp_done(void)
}
/* Ok, they are spinning and ready to go. */
smp_processors_ready = 1;
}
/* At each hardware IRQ, we get this called to forward IRQ reception

View file

@ -1561,7 +1561,7 @@ restart:
/* ->key must be copied to avoid race with cfq_exit_queue() */
k = __cic->key;
if (unlikely(!k)) {
cfq_drop_dead_cic(ioc, cic);
cfq_drop_dead_cic(ioc, __cic);
goto restart;
}

View file

@ -765,7 +765,8 @@ void elv_unregister(struct elevator_type *e)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
e->ops.trim(p->io_context);
if (p->io_context)
e->ops.trim(p->io_context);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);

View file

@ -3628,6 +3628,8 @@ struct io_context *current_io_context(gfp_t gfp_flags)
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic_root.rb_node = NULL;
/* make sure set_task_ioprio() sees the settings above */
smp_wmb();
tsk->io_context = ret;
}

View file

@ -285,6 +285,8 @@ static int __init acpi_ac_init(void)
{
int result;
if (acpi_disabled)
return -ENODEV;
acpi_ac_dir = acpi_lock_ac_dir();
if (!acpi_ac_dir)

View file

@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle,
status = is_memory_device(handle);
if (ACPI_FAILURE(status)){
ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
if (ACPI_FAILURE(status))
return AE_OK; /* continue */
}
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
acpi_memory_device_notify, NULL);
@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle,
status = is_memory_device(handle);
if (ACPI_FAILURE(status)){
ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
if (ACPI_FAILURE(status))
return AE_OK; /* continue */
}
status = acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,

View file

@ -757,6 +757,9 @@ static int __init acpi_battery_init(void)
{
int result;
if (acpi_disabled)
return -ENODEV;
acpi_battery_dir = acpi_lock_battery_dir();
if (!acpi_battery_dir)
return -ENODEV;

View file

@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/pm.h>
@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
if (ACPI_FAILURE(status) || !*device) {
ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle));
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
handle));
return -ENODEV;
}
@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
/* Make sure this is a valid target state */
if (!device->flags.power_manageable) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable",
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
device->kobj.name));
return -ENODEV;
}
@ -738,7 +740,10 @@ static int __init acpi_init(void)
return -ENODEV;
}
firmware_register(&acpi_subsys);
result = firmware_register(&acpi_subsys);
if (result < 0)
printk(KERN_WARNING "%s: firmware_register error: %d\n",
__FUNCTION__, result);
result = acpi_bus_init();

View file

@ -91,6 +91,14 @@ enum {
HK_EVENT_ENTERRING_S5,
};
enum conf_entry_enum {
bus_handle = 0,
bus_method = 1,
action_handle = 2,
method = 3,
LAST_CONF_ENTRY
};
/* procdir we use */
static struct proc_dir_entry *hotkey_proc_dir;
static struct proc_dir_entry *hotkey_config;
@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file)
static char *format_result(union acpi_object *object)
{
char *buf = NULL;
buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL);
if (buf)
memset(buf, 0, RESULT_STR_LEN);
else
goto do_fail;
char *buf;
buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
if (!buf)
return NULL;
/* Now, just support integer type */
if (object->type == ACPI_TYPE_INTEGER)
sprintf(buf, "%d\n", (u32) object->integer.value);
do_fail:
return (buf);
return buf;
}
static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key)
static void free_hotkey_buffer(union acpi_hotkey *key)
{
/* key would never be null, action method could be */
kfree(key->event_hotkey.action_method);
}
static void free_poll_hotkey_buffer(union acpi_hotkey *key)
{
/* key would never be null, others could be*/
kfree(key->poll_hotkey.action_method);
kfree(key->poll_hotkey.poll_method);
kfree(key->poll_hotkey.poll_result);
}
static int
init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str,
char *method, int std_num, int external_num)
init_hotkey_device(union acpi_hotkey *key, char **config_entry,
int std_num, int external_num)
{
acpi_handle tmp_handle;
acpi_status status = AE_OK;
if (std_num < 0 || IS_POLL(std_num) || !key)
goto do_fail;
if (!bus_str || !action_str || !method)
if (!config_entry[bus_handle] || !config_entry[action_handle]
|| !config_entry[method])
goto do_fail;
key->link.hotkey_type = ACPI_HOTKEY_EVENT;
key->link.hotkey_standard_num = std_num;
key->event_hotkey.flag = 0;
key->event_hotkey.action_method = method;
key->event_hotkey.action_method = config_entry[method];
status =
acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle));
status = acpi_get_handle(NULL, config_entry[bus_handle],
&(key->event_hotkey.bus_handle));
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
key->event_hotkey.external_hotkey_num = external_num;
status =
acpi_get_handle(NULL, action_str,
status = acpi_get_handle(NULL, config_entry[action_handle],
&(key->event_hotkey.action_handle));
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
status = acpi_get_handle(key->event_hotkey.action_handle,
method, &tmp_handle);
config_entry[method], &tmp_handle);
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
return AE_OK;
do_fail:
do_fail_zero:
key->event_hotkey.action_method = NULL;
do_fail:
return -ENODEV;
}
static int
init_poll_hotkey_device(union acpi_hotkey *key,
char *poll_str,
char *poll_method,
char *action_str, char *action_method, int std_num)
init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
int std_num)
{
acpi_status status = AE_OK;
acpi_handle tmp_handle;
if (std_num < 0 || IS_EVENT(std_num) || !key)
goto do_fail;
if (!poll_str || !poll_method || !action_str || !action_method)
if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
!config_entry[action_handle] || !config_entry[method])
goto do_fail;
key->link.hotkey_type = ACPI_HOTKEY_POLLING;
key->link.hotkey_standard_num = std_num;
key->poll_hotkey.flag = 0;
key->poll_hotkey.poll_method = poll_method;
key->poll_hotkey.action_method = action_method;
key->poll_hotkey.poll_method = config_entry[bus_method];
key->poll_hotkey.action_method = config_entry[method];
status =
acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle));
status = acpi_get_handle(NULL, config_entry[bus_handle],
&(key->poll_hotkey.poll_handle));
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
status = acpi_get_handle(key->poll_hotkey.poll_handle,
poll_method, &tmp_handle);
config_entry[bus_method], &tmp_handle);
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
status =
acpi_get_handle(NULL, action_str,
acpi_get_handle(NULL, config_entry[action_handle],
&(key->poll_hotkey.action_handle));
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
status = acpi_get_handle(key->poll_hotkey.action_handle,
action_method, &tmp_handle);
config_entry[method], &tmp_handle);
if (ACPI_FAILURE(status))
goto do_fail;
goto do_fail_zero;
key->poll_hotkey.poll_result =
(union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!key->poll_hotkey.poll_result)
goto do_fail;
goto do_fail_zero;
return AE_OK;
do_fail:
do_fail_zero:
key->poll_hotkey.poll_method = NULL;
key->poll_hotkey.action_method = NULL;
do_fail:
return -ENODEV;
}
@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
}
static int
get_parms(char *config_record,
int *cmd,
char **bus_handle,
char **bus_method,
char **action_handle,
char **method, int *internal_event_num, int *external_event_num)
get_parms(char *config_record, int *cmd, char **config_entry,
int *internal_event_num, int *external_event_num)
{
/* the format of *config_record =
* "1:\d+:*" : "cmd:internal_event_num"
* "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
* "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
*/
char *tmp, *tmp1, count;
int i;
sscanf(config_record, "%d", cmd);
if (*cmd == 1) {
if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
2)
@ -674,59 +683,27 @@ get_parms(char *config_record,
if (!tmp)
goto do_fail;
tmp++;
tmp1 = strchr(tmp, ':');
if (!tmp1)
goto do_fail;
count = tmp1 - tmp;
*bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!*bus_handle)
goto do_fail;
strncpy(*bus_handle, tmp, count);
*(*bus_handle + count) = 0;
tmp = tmp1;
tmp++;
tmp1 = strchr(tmp, ':');
if (!tmp1)
goto do_fail;
count = tmp1 - tmp;
*bus_method = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!*bus_method)
goto do_fail;
strncpy(*bus_method, tmp, count);
*(*bus_method + count) = 0;
tmp = tmp1;
tmp++;
tmp1 = strchr(tmp, ':');
if (!tmp1)
goto do_fail;
count = tmp1 - tmp;
*action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!*action_handle)
goto do_fail;
strncpy(*action_handle, tmp, count);
*(*action_handle + count) = 0;
tmp = tmp1;
tmp++;
tmp1 = strchr(tmp, ':');
if (!tmp1)
goto do_fail;
count = tmp1 - tmp;
*method = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!*method)
goto do_fail;
strncpy(*method, tmp, count);
*(*method + count) = 0;
if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <=
0)
goto do_fail;
return 6;
do_fail:
for (i = 0; i < LAST_CONF_ENTRY; i++) {
tmp1 = strchr(tmp, ':');
if (!tmp1) {
goto do_fail;
}
count = tmp1 - tmp;
config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
if (!config_entry[i])
goto handle_failure;
strncpy(config_entry[i], tmp, count);
tmp = tmp1 + 1;
}
if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
goto handle_failure;
if (!IS_OTHERS(*internal_event_num)) {
return 6;
}
handle_failure:
while (i-- > 0)
kfree(config_entry[i]);
do_fail:
return -1;
}
@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file,
size_t count, loff_t * data)
{
char *config_record = NULL;
char *bus_handle = NULL;
char *bus_method = NULL;
char *action_handle = NULL;
char *method = NULL;
char *config_entry[LAST_CONF_ENTRY];
int cmd, internal_event_num, external_event_num;
int ret = 0;
union acpi_hotkey *key = NULL;
union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
config_record = (char *)kmalloc(count + 1, GFP_KERNEL);
if (!config_record)
if (!key)
return -ENOMEM;
config_record = kzalloc(count + 1, GFP_KERNEL);
if (!config_record) {
kfree(key);
return -ENOMEM;
}
if (copy_from_user(config_record, buffer, count)) {
kfree(config_record);
kfree(key);
printk(KERN_ERR PREFIX "Invalid data\n");
return -EINVAL;
}
config_record[count] = 0;
ret = get_parms(config_record,
&cmd,
&bus_handle,
&bus_method,
&action_handle,
&method, &internal_event_num, &external_event_num);
ret = get_parms(config_record, &cmd, config_entry,
&internal_event_num, &external_event_num);
kfree(config_record);
if (IS_OTHERS(internal_event_num))
goto do_fail;
if (ret != 6) {
do_fail:
kfree(bus_handle);
kfree(bus_method);
kfree(action_handle);
kfree(method);
printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
return -EINVAL;
}
key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
if (!key)
goto do_fail;
memset(key, 0, sizeof(union acpi_hotkey));
if (cmd == 1) {
union acpi_hotkey *tmp = NULL;
tmp = get_hotkey_by_event(&global_hotkey_list,
@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file,
goto cont_cmd;
}
if (IS_EVENT(internal_event_num)) {
kfree(bus_method);
ret = init_hotkey_device(key, bus_handle, action_handle, method,
internal_event_num,
external_event_num);
} else
ret = init_poll_hotkey_device(key, bus_handle, bus_method,
action_handle, method,
internal_event_num);
if (ret) {
kfree(bus_handle);
kfree(action_handle);
if (IS_EVENT(internal_event_num))
free_hotkey_buffer(key);
else
free_poll_hotkey_buffer(key);
kfree(key);
printk(KERN_ERR PREFIX "Invalid hotkey\n");
return -EINVAL;
if (init_hotkey_device(key, config_entry,
internal_event_num, external_event_num))
goto init_hotkey_fail;
} else {
if (init_poll_hotkey_device(key, config_entry,
internal_event_num))
goto init_poll_hotkey_fail;
}
cont_cmd:
kfree(bus_handle);
kfree(action_handle);
cont_cmd:
switch (cmd) {
case 0:
if (get_hotkey_by_event
(&global_hotkey_list, key->link.hotkey_standard_num))
if (get_hotkey_by_event(&global_hotkey_list,
key->link.hotkey_standard_num))
goto fail_out;
else
hotkey_add(key);
@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file,
hotkey_remove(key);
break;
case 2:
/* key is kfree()ed if matched*/
if (hotkey_update(key))
goto fail_out;
break;
@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file,
break;
}
return count;
fail_out:
if (IS_EVENT(internal_event_num))
free_hotkey_buffer(key);
else
free_poll_hotkey_buffer(key);
init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
kfree(config_entry[bus_method]);
config_entry[bus_method] = NULL;
init_hotkey_fail: /* failed init_hotkey_device */
kfree(config_entry[method]);
fail_out:
kfree(config_entry[bus_handle]);
kfree(config_entry[action_handle]);
/* No double free since elements =NULL for error cases */
if (IS_EVENT(internal_event_num)) {
if (config_entry[bus_method])
kfree(config_entry[bus_method]);
free_hotkey_buffer(key); /* frees [method] */
} else
free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
kfree(key);
printk(KERN_ERR PREFIX "invalid key\n");
return -EINVAL;
@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file,
union acpi_hotkey *key;
arg = (char *)kmalloc(count + 1, GFP_KERNEL);
arg = kzalloc(count + 1, GFP_KERNEL);
if (!arg)
return -ENOMEM;
arg[count] = 0;
if (copy_from_user(arg, buffer, count)) {
kfree(arg);

View file

@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device)
status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
kfree(ec_hc->smbus);
kfree(ec_hc);
kfree(smbus);
return -EIO;
}

View file

@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
/*
* This can be called during resume with interrupts off.
* Like boot-time, we should be single threaded and will
* always get the lock if we try -- timeout or not.
* If this doesn't succeed, then we will oops courtesy of
* might_sleep() in down().
*/
if (!down_trylock(sem))
return AE_OK;
switch (timeout) {
/*
* No Wait:

View file

@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void)
{
int result = 0;
if (acpi_disabled)
return -ENODEV;
init_MUTEX(&sbs_sem);
if (capacity_mode != DEF_CAPACITY_UNIT

View file

@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <acpi/acpi_drivers.h>
@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = {
static void acpi_device_register(struct acpi_device *device,
struct acpi_device *parent)
{
int err;
/*
* Linkage
* -------
@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device,
device->kobj.parent = &parent->kobj;
device->kobj.ktype = &ktype_acpi_ns;
device->kobj.kset = &acpi_namespace_kset;
kobject_register(&device->kobj);
err = kobject_register(&device->kobj);
if (err < 0)
printk(KERN_WARNING "%s: kobject_register error: %d\n",
__FUNCTION__, err);
create_sysfs_device_files(device);
}
@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void)
if (acpi_disabled)
return 0;
kset_register(&acpi_namespace_kset);
result = kset_register(&acpi_namespace_kset);
if (result < 0)
printk(KERN_ERR PREFIX "kset_register error: %d\n", result);
result = bus_register(&acpi_bus_type);
if (result) {

View file

@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle,
if (!data)
return AE_BAD_PARAMETER;
element = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
if (!element)
return AE_NO_MEMORY;

View file

@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
"Node %d NFS Unstable: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d Slab: %8lu kB\n",
nid, K(i.totalram),

View file

@ -266,7 +266,7 @@ repeat:
goto out;
if (req->cmd != READ) {
printk("GSCD: bad cmd %lu\n", rq_data_dir(req));
printk("GSCD: bad cmd %u\n", rq_data_dir(req));
end_request(req, 0);
goto repeat;
}

View file

@ -142,6 +142,7 @@ typedef struct _moxa_board_conf {
static moxa_board_conf moxa_boards[MAX_BOARDS];
static void __iomem *moxaBaseAddr[MAX_BOARDS];
static int loadstat[MAX_BOARDS];
struct moxa_str {
int type;
@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void)
if (moxaCard == 0)
return (-1);
for (card = 0; card < MAX_BOARDS; card++) {
if (loadstat[card] == 0)
continue;
if ((ports = moxa_boards[card].numPorts) == 0)
continue;
if (readb(moxaIntPend[card]) == 0xff) {
@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
}
break;
}
loadstat[cardno] = 1;
return (0);
}
@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
len1 = len >> 1;
ptr = (ushort *) moxaBuff;
for (i = 0; i < len1; i++)
usum += *(ptr + i);
usum += le16_to_cpu(*(ptr + i));
retry = 0;
do {
len1 = len >> 1;
@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
wlen = len >> 1;
uptr = (ushort *) moxaBuff;
for (i = 0; i < wlen; i++)
usum += uptr[i];
usum += le16_to_cpu(uptr[i]);
retry = 0;
j = 0;
do {

File diff suppressed because it is too large Load diff

View file

@ -36,6 +36,18 @@
#define TERMIOS_WAIT 2
#define TERMIOS_TERMIO 4
/**
* tty_wait_until_sent - wait for I/O to finish
* @tty: tty we are waiting for
* @timeout: how long we will wait
*
* Wait for characters pending in a tty driver to hit the wire, or
* for a timeout to occur (eg due to flow control)
*
* Locking: none
*/
void tty_wait_until_sent(struct tty_struct * tty, long timeout)
{
DECLARE_WAITQUEUE(wait, current);
@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios,
old->c_cc[i] : termios->c_cc[i];
}
/**
* change_termios - update termios values
* @tty: tty to update
* @new_termios: desired new value
*
* Perform updates to the termios values set on this terminal. There
* is a bit of layering violation here with n_tty in terms of the
* internal knowledge of this function.
*
* Locking: termios_sem
*/
static void change_termios(struct tty_struct * tty, struct termios * new_termios)
{
int canon_change;
@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
up(&tty->termios_sem);
}
/**
* set_termios - set termios values for a tty
* @tty: terminal device
* @arg: user data
* @opt: option information
*
* Helper function to prepare termios data and run neccessary other
* functions before using change_termios to do the actual changes.
*
* Locking:
* Called functions take ldisc and termios_sem locks
*/
static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
{
struct termios tmp_termios;
@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags)
}
}
/**
* set_sgttyb - set legacy terminal values
* @tty: tty structure
* @sgttyb: pointer to old style terminal structure
*
* Updates a terminal from the legacy BSD style terminal information
* structure.
*
* Locking: termios_sem
*/
static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
{
int retval;
@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars)
}
#endif
/*
* Send a high priority character to the tty.
/**
* send_prio_char - send priority character
*
* Send a high priority character to the tty even if stopped
*
* Locking: none
*
* FIXME: overlapping calls with start/stop tty lose state of tty
*/
static void send_prio_char(struct tty_struct *tty, char ch)
{
int was_stopped = tty->stopped;

View file

@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
return -EPERM;
vt_dont_switch = 0;
return 0;
case VT_GETHIFONTMASK:
return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
default:
return -ENOIOCTLCMD;
}

View file

@ -26,6 +26,7 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@ -64,17 +65,17 @@
#define ABIT_UGURU_IN_SENSOR 0
#define ABIT_UGURU_TEMP_SENSOR 1
#define ABIT_UGURU_NC 2
/* Timeouts / Retries, if these turn out to need a lot of fiddling we could
convert them to params. */
/* 250 was determined by trial and error, 200 works most of the time, but not
always. I assume this is cpu-speed independent, since the ISA-bus and not
the CPU should be the bottleneck. Note that 250 sometimes is still not
enough (only reported on AN7 mb) this is handled by a higher layer. */
#define ABIT_UGURU_WAIT_TIMEOUT 250
/* In many cases we need to wait for the uGuru to reach a certain status, most
of the time it will reach this status within 30 - 90 ISA reads, and thus we
can best busy wait. This define gives the total amount of reads to try. */
#define ABIT_UGURU_WAIT_TIMEOUT 125
/* However sometimes older versions of the uGuru seem to be distracted and they
do not respond for a long time. To handle this we sleep before each of the
last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */
#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5
/* Normally all expected status in abituguru_ready, are reported after the
first read, but sometimes not and we need to poll, 5 polls was not enough
50 sofar is. */
#define ABIT_UGURU_READY_TIMEOUT 50
first read, but sometimes not and we need to poll. */
#define ABIT_UGURU_READY_TIMEOUT 5
/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */
#define ABIT_UGURU_MAX_RETRIES 3
#define ABIT_UGURU_RETRY_DELAY (HZ/5)
@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state)
timeout--;
if (timeout == 0)
return -EBUSY;
/* sleep a bit before our last few tries, see the comment on
this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */
if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP)
msleep(0);
}
return 0;
}
@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data)
"CMD reg does not hold 0xAC after ready command\n");
return -EIO;
}
msleep(0);
}
/* After this the ABIT_UGURU_DATA port should contain
@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data)
"state != more input after ready command\n");
return -EIO;
}
msleep(0);
}
data->uguru_ready = 1;
@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data,
/* And read the data */
for (i = 0; i < count; i++) {
if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for "
ABIT_UGURU_DEBUG(retries ? 1 : 3,
"timeout exceeded waiting for "
"read state (bank: %d, sensor: %d)\n",
(int)bank_addr, (int)sensor_addr);
break;
@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data,
static int abituguru_write(struct abituguru_data *data,
u8 bank_addr, u8 sensor_addr, u8 *buf, int count)
{
int i;
/* We use the ready timeout as we have to wait for 0xAC just like the
ready function */
int i, timeout = ABIT_UGURU_READY_TIMEOUT;
/* Send the address */
i = abituguru_send_address(data, bank_addr, sensor_addr,
@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data,
}
/* Now we need to wait till the chip is ready to be read again,
don't ask why */
so that we can read 0xAC as confirmation that our write has
succeeded. */
if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state "
"after write (bank: %d, sensor: %d)\n", (int)bank_addr,
@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data,
}
/* Cmd port MUST be read now and should contain 0xAC */
if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write "
"(bank: %d, sensor: %d)\n", (int)bank_addr,
(int)sensor_addr);
return -EIO;
while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
timeout--;
if (timeout == 0) {
ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after "
"write (bank: %d, sensor: %d)\n",
(int)bank_addr, (int)sensor_addr);
return -EIO;
}
msleep(0);
}
/* Last put the chip back in ready state */
@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
u8 sensor_addr)
{
u8 val, buf[3];
int ret = ABIT_UGURU_NC;
int i, ret = -ENODEV; /* error is the most common used retval :| */
/* If overriden by the user return the user selected type */
if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR &&
@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
buf[2] = 250;
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
buf, 3) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
/* Now we need 20 ms to give the uguru time to read the sensors
and raise a voltage alarm */
set_current_state(TASK_UNINTERRUPTIBLE);
@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/* Check for alarm and check the alarm is a volt low alarm. */
if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
sensor_addr, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
/* Restore original settings */
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
sensor_addr,
data->bank1_settings[sensor_addr],
3) != 3)
return -ENODEV;
ABIT_UGURU_DEBUG(2, " found volt sensor\n");
return ABIT_UGURU_IN_SENSOR;
ret = ABIT_UGURU_IN_SENSOR;
goto abituguru_detect_bank1_sensor_type_exit;
} else
ABIT_UGURU_DEBUG(2, " alarm raised during volt "
"sensor test, but volt low flag not set\n");
@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
buf[2] = 10;
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
buf, 3) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
/* Now we need 50 ms to give the uguru time to read the sensors
and raise a temp alarm */
set_current_state(TASK_UNINTERRUPTIBLE);
@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/* Check for alarm and check the alarm is a temp high alarm. */
if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
sensor_addr, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
return -ENODEV;
goto abituguru_detect_bank1_sensor_type_exit;
if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) {
ret = ABIT_UGURU_TEMP_SENSOR;
ABIT_UGURU_DEBUG(2, " found temp sensor\n");
ret = ABIT_UGURU_TEMP_SENSOR;
goto abituguru_detect_bank1_sensor_type_exit;
} else
ABIT_UGURU_DEBUG(2, " alarm raised during temp "
"sensor test, but temp high flag not set\n");
@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor "
"test\n");
/* Restore original settings */
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
data->bank1_settings[sensor_addr], 3) != 3)
ret = ABIT_UGURU_NC;
abituguru_detect_bank1_sensor_type_exit:
/* Restore original settings, failing here is really BAD, it has been
reported that some BIOS-es hang when entering the uGuru menu with
invalid settings present in the uGuru, so we try this 3 times. */
for (i = 0; i < 3; i++)
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
sensor_addr, data->bank1_settings[sensor_addr],
3) == 3)
break;
if (i == 3) {
printk(KERN_ERR ABIT_UGURU_NAME
": Fatal error could not restore original settings. "
"This should never happen please report this to the "
"abituguru maintainer (see MAINTAINERS)\n");
return -ENODEV;
}
return ret;
}
@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev)
data->update_timeouts = 0;
LEAVE_UPDATE:
/* handle timeout condition */
if (err == -EBUSY) {
if (!success && (err == -EBUSY || err >= 0)) {
/* No overflow please */
if (data->update_timeouts < 255u)
data->update_timeouts++;

View file

@ -43,13 +43,12 @@
/*-------------------------------------------------------------------------*/
#define DRIVER_VERSION "2 May 2005"
#define DRIVER_NAME (tps65010_driver.name)
#define DRIVER_NAME (tps65010_driver.driver.name)
MODULE_DESCRIPTION("TPS6501x Power Management Driver");
MODULE_LICENSE("GPL");
static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
static unsigned short normal_i2c_range[] = { I2C_CLIENT_END };
I2C_CLIENT_INSMOD;
@ -100,7 +99,7 @@ struct tps65010 {
/* not currently tracking GPIO state */
};
#define POWER_POLL_DELAY msecs_to_jiffies(800)
#define POWER_POLL_DELAY msecs_to_jiffies(5000)
/*-------------------------------------------------------------------------*/
@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
goto fail1;
}
/* the IRQ is active low, but many gpio lines can't support that
* so this driver can use falling-edge triggers instead.
*/
irqflags = IRQF_SAMPLE_RANDOM;
#ifdef CONFIG_ARM
irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW;
if (machine_is_omap_h2()) {
tps->model = TPS65010;
omap_cfg_reg(W4_GPIO58);
@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
// FIXME set up this board's IRQ ...
}
#else
irqflags = IRQF_SAMPLE_RANDOM;
#endif
if (tps->irq > 0) {

View file

@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *of_node;
@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
}
#endif
pci_save_state(pdev);
return 0;
}

View file

@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
event->event == IB_EVENT_SM_CHANGE) {
event->event == IB_EVENT_SM_CHANGE ||
event->event == IB_EVENT_CLIENT_REREGISTER) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
INIT_WORK(&work->work, ib_cache_task, work);

View file

@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
event->event == IB_EVENT_SM_CHANGE) {
event->event == IB_EVENT_SM_CHANGE ||
event->event == IB_EVENT_CLIENT_REREGISTER) {
struct ib_sa_device *sa_dev;
sa_dev = container_of(handler, typeof(*sa_dev), event_handler);

View file

@ -967,12 +967,12 @@ static struct {
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
.flags = 0 },
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400),
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
.flags = MTHCA_FLAG_PCIE },
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0),
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800),
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }

View file

@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = IB_NODE_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.modify_srq = mthca_modify_srq;
dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
dev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
if (mthca_is_memfree(dev))
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;

View file

@ -136,8 +136,8 @@ struct mthca_ah {
* We have one global lock that protects dev->cq/qp_table. Each
* struct mthca_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
* a qp may be locked, with the send cq locked first. No other
* nesting should be done.
* a qp may be locked, with the cq with the lower cqn locked first.
* No other nesting should be done.
*
* Each struct mthca_cq/qp also has an ref count, protected by the
* corresponding table lock. The pointer from the cq/qp_table to the

View file

@ -99,6 +99,10 @@ enum {
MTHCA_QP_BIT_RSC = 1 << 3
};
enum {
MTHCA_SEND_DOORBELL_FENCE = 1 << 5
};
struct mthca_qp_path {
__be32 port_pkey;
u8 rnr_retry;
@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
return 0;
}
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_lock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_unlock_irq(&send_cq->lock);
else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
}
}
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
mthca_unlock_cqs(send_cq, recv_cq);
err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
spin_lock_irq(&send_cq->lock);
if (send_cq != recv_cq)
spin_lock(&recv_cq->lock);
mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
--qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (send_cq != recv_cq)
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
mthca_unlock_cqs(send_cq, recv_cq);
wait_event(qp->wait, !get_qp_refcount(dev, qp));
@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
u32 f0 = 0;
u32 f0;
int ind;
u8 op0 = 0;
@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
u32 f0 = 0;
u32 f0;
int ind;
u8 op0 = 0;
@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;

View file

@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
return iser_conn_set_full_featured_mode(conn);
}
static void
iscsi_iser_conn_terminate(struct iscsi_conn *conn)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iser_conn *ib_conn = iser_conn->ib_conn;
BUG_ON(!ib_conn);
/* starts conn teardown process, waits until all previously *
* posted buffers get flushed, deallocates all conn resources */
iser_conn_terminate(ib_conn);
iser_conn->ib_conn = NULL;
conn->recv_lock = NULL;
}
static struct iscsi_transport iscsi_iser_transport;
static struct iscsi_cls_session *
@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
static void
iscsi_iser_ep_disconnect(__u64 ep_handle)
{
struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
struct iser_conn *ib_conn;
ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
if (!ib_conn)
return;
iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
iser_conn_terminate(ib_conn);
}
@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_conn_stop,
/* these are called as part of conn recovery */
.suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
.terminate_conn = iscsi_iser_conn_terminate,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,

View file

@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd)
i++;
dev->rep[REP_PERIOD] = period[i];
while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY])
while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY])
j++;
dev->rep[REP_DELAY] = delay[j];

View file

@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi)
return 1;
}
static struct key_entry keymap_empty[] __initdata = {
static struct key_entry keymap_empty[] = {
{ KE_END, 0 }
};
static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
static struct key_entry keymap_fs_amilo_pro_v2000[] = {
{ KE_KEY, 0x01, KEY_HELP },
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_fujitsu_n3510[] __initdata = {
static struct key_entry keymap_fujitsu_n3510[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_KEY, 0x36, KEY_WWW },
@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_wistron_ms2111[] __initdata = {
static struct key_entry keymap_wistron_ms2111[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_KEY, 0x13, KEY_PROG3 },
@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_wistron_ms2141[] __initdata = {
static struct key_entry keymap_wistron_ms2141[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_WIFI, 0x30, 0 },
@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_acer_aspire_1500[] __initdata = {
static struct key_entry keymap_acer_aspire_1500[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_WIFI, 0x30, 0 },
@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_acer_travelmate_240[] __initdata = {
static struct key_entry keymap_acer_travelmate_240[] = {
{ KE_KEY, 0x31, KEY_MAIL },
{ KE_KEY, 0x36, KEY_WWW },
{ KE_KEY, 0x11, KEY_PROG1 },
@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = {
{ KE_END, 0 }
};
static struct key_entry keymap_aopen_1559as[] __initdata = {
static struct key_entry keymap_aopen_1559as[] = {
{ KE_KEY, 0x01, KEY_HELP },
{ KE_KEY, 0x06, KEY_PROG3 },
{ KE_KEY, 0x11, KEY_PROG1 },

View file

@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
param[0] = 40;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 60;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
if (set_properties) {
set_bit(BTN_MIDDLE, psmouse->dev->keybit);
set_bit(REL_WHEEL, psmouse->dev->relbit);

View file

@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
struct region *reg, *nreg;
read_unlock(&rh->hash_lock);
nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
if (unlikely(!nreg))
nreg = kmalloc(sizeof(struct region), GFP_NOIO);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
RH_CLEAN : RH_NOSYNC;
nreg->rh = rh;

View file

@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
repeat:
spin_lock_irq(&mddev->write_lock);
if (mddev->degraded && mddev->sb_dirty == 3)
/* If the array is degraded, then skipping spares is both
* dangerous and fairly pointless.
* Dangerous because a device that was removed from the array
* might have a event_count that still looks up-to-date,
* so it can be re-added without a resync.
* Pointless because if there are any spares to skip,
* then a recovery will happen and soon that array won't
* be degraded any more and the spare can go back to sleep then.
*/
mddev->sb_dirty = 1;
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
if (mddev->sb_dirty == 3)

View file

@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return 0;
}
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
if (mddev->bitmap == NULL &&
mddev->recovery_cp == MaxSector &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
return max_sector - sector_nr;
}
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */

View file

@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER
struct work_struct fc_setup_reset_work;
struct list_head fc_rports;
spinlock_t fc_rescan_work_lock;
int fc_rescan_work_count;
struct work_struct fc_rescan_work;
char fc_rescan_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;

View file

@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
* if still doing discovery,
* hang loose a while until finished
*/
if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) {
if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
(pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
(pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
== MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
if (count-- > 0) {
msleep(100);
goto try_again;
@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
int ii;
int work_to_do;
u64 pn;
unsigned long flags;
struct mptfc_rport_info *ri;
do {
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
/* start by tagging all ports as missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
}
/*
* now rescan devices known to adapter,
* will reregister existing rports
*/
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
(void) mptfc_GetFcPortPage0(ioc, ii);
mptfc_init_host_attr(ioc,ii); /* refresh */
mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
/*
* now rescan devices known to adapter,
* will reregister existing rports
*/
for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
(void) mptfc_GetFcPortPage0(ioc, ii);
mptfc_init_host_attr(ioc, ii); /* refresh */
mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
}
/* delete devices still missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
/* if newly missing, delete it */
if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_rescan.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
(unsigned long long)pn));
}
/* delete devices still missing */
list_for_each_entry(ri, &ioc->fc_rports, list) {
/* if newly missing, delete it */
if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
MPT_RPORT_INFO_FLAGS_MISSING);
fc_remote_port_delete(ri->rport); /* won't sleep */
ri->rport = NULL;
pn = (u64)ri->pg0.WWPN.High << 32 |
(u64)ri->pg0.WWPN.Low;
dfcprintk ((MYIOC_s_INFO_FMT
"mptfc_rescan.%d: %llx deleted\n",
ioc->name,
ioc->sh->host_no,
(unsigned long long)pn));
}
}
/*
* allow multiple passes as target state
* might have changed during scan
*/
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_count > 2) /* only need one more */
ioc->fc_rescan_work_count = 2;
work_to_do = --ioc->fc_rescan_work_count;
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
} while (work_to_do);
}
}
static int
@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* by doing it via the workqueue, some locking is eliminated
*/
ioc->fc_rescan_work_count = 1;
queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
flush_workqueue(ioc->fc_rescan_work_q);
@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
case MPI_EVENT_RESCAN:
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
if (ioc->fc_rescan_work_count++ == 0) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
break;
@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
mptfc_SetFcPortPage1_defaults(ioc);
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
if (ioc->fc_rescan_work_count++ == 0) {
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
queue_work(ioc->fc_rescan_work_q,
&ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
}

View file

@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
if (ctrl & NAND_CTRL_CHANGE) {
unsigned long bits;
bits = (~ctrl & NAND_NCE) << 2;
bits |= (ctrl & NAND_CLE) << 7;
bits |= (ctrl & NAND_ALE) << 6;
bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
ams_delta_latch2_write(0xC2, bits);
ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
AMS_DELTA_LATCH2_NAND_ALE |
AMS_DELTA_LATCH2_NAND_NCE, bits);
}
if (cmd != NAND_CMD_NONE)

View file

@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = nand_do_read_ops(mtd, from, &chip->ops);
*retlen = chip->ops.retlen;
nand_release_device(mtd);
*retlen = chip->ops.retlen;
return ret;
}
@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = nand_do_write_ops(mtd, to, &chip->ops);
*retlen = chip->ops.retlen;
nand_release_device(mtd);
*retlen = chip->ops.retlen;
return ret;
}

View file

@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM
config HOTPLUG_PCI_ACPI
tristate "ACPI PCI Hotplug driver"
depends on ACPI_DOCK && HOTPLUG_PCI
depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI)
help
Say Y here if you have a system that supports PCI Hotplug using
ACPI.

View file

@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot)
int cpci_configure_slot(struct slot* slot)
{
unsigned char busnr;
struct pci_bus *child;
struct pci_bus *parent;
int fn;
dbg("%s - enter", __FUNCTION__);
@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot)
*/
n = pci_scan_slot(slot->bus, slot->devfn);
dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
if (n > 0)
pci_bus_add_devices(slot->bus);
slot->dev = pci_get_slot(slot->bus, slot->devfn);
if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
return 1;
return -ENODEV;
}
}
parent = slot->dev->bus;
if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr);
child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr);
pci_do_scan_bus(child);
pci_bus_size_bridges(child);
for (fn = 0; fn < 8; fn++) {
struct pci_dev *dev;
dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
if (!dev)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
/* Find an unused bus number for the new bridge */
struct pci_bus *child;
unsigned char busnr, start = parent->secondary;
unsigned char end = parent->subordinate;
for (busnr = start; busnr <= end; busnr++) {
if (!pci_find_bus(pci_domain_nr(parent),
busnr))
break;
}
if (busnr >= end) {
err("No free bus for hot-added bridge\n");
pci_dev_put(dev);
continue;
}
child = pci_add_new_bus(parent, dev, busnr);
if (!child) {
err("Cannot add new bus for %s\n",
pci_name(dev));
pci_dev_put(dev);
continue;
}
child->subordinate = pci_do_scan_bus(child);
pci_bus_size_bridges(child);
}
pci_dev_put(dev);
}
pci_bus_assign_resources(slot->dev->bus);
pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
pci_enable_bridges(parent);
dbg("%s - exit", __FUNCTION__);
return 0;

View file

@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
/**
* pci_match_device - Tell if a PCI device structure has a matching
* PCI device id structure
* @ids: array of PCI device id structures to search in
* @dev: the PCI device structure to match against
* @drv: the PCI driver to match against
* @dev: the PCI device structure to match against
*
* Used by a driver to check whether a PCI device present in the
* system is in its list of supported devices. Returns the matching

View file

@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
pci_read_config_dword(dev, 0x48, &region);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
/*
@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command;
u32 bar;
u8 __iomem *csr;
u8 cmd_hi;
switch (dev->device) {
/* PCI IDs taken from drivers/net/e100.c */
case 0x1029:
case 0x1030 ... 0x1034:
case 0x1038 ... 0x103E:
case 0x1050 ... 0x1057:
case 0x1059:
case 0x1064 ... 0x106B:
case 0x1091 ... 0x1095:
case 0x1209:
case 0x1229:
case 0x2449:
case 0x2459:
case 0x245D:
case 0x27DC:
break;
default:
return;
}
/*
* Some firmware hands off the e100 with interrupts enabled,
* which can cause a flood of interrupts if packets are
* received before the driver attaches to the device. So
* disable all e100 interrupts here. The driver will
* re-enable them when it's ready.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
if (!(command & PCI_COMMAND_MEMORY) || !bar)
return;
csr = ioremap(bar, 8);
if (!csr) {
printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
pci_name(dev));
return;
}
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts "
"enabled, disabling\n", pci_name(dev));
writeb(1, csr + 3);
}
iounmap(csr);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
{

View file

@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to)
pr_debug("%s: aie=%d\n", __FUNCTION__, to);
tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (to)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, S3C2410_RTCALM);
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
}
static void s3c_rtc_setpie(int to)
@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to)
pr_debug("%s: pie=%d\n", __FUNCTION__, to);
spin_lock_irq(&s3c_rtc_pie_lock);
tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
if (to)
tmp |= S3C2410_TICNT_ENABLE;
writeb(tmp, S3C2410_TICNT);
writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
}
@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq)
unsigned int tmp;
spin_lock_irq(&s3c_rtc_pie_lock);
tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
s3c_rtc_freq = freq;
tmp |= (128 / freq)-1;
writeb(tmp, S3C2410_TICNT);
writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
}
@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq)
static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int have_retried = 0;
void __iomem *base = s3c_rtc_base;
retry_get_time:
rtc_tm->tm_min = readb(S3C2410_RTCMIN);
rtc_tm->tm_hour = readb(S3C2410_RTCHOUR);
rtc_tm->tm_mday = readb(S3C2410_RTCDATE);
rtc_tm->tm_mon = readb(S3C2410_RTCMON);
rtc_tm->tm_year = readb(S3C2410_RTCYEAR);
rtc_tm->tm_sec = readb(S3C2410_RTCSEC);
rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
/* the only way to work out wether the system was mid-update
* when we read it is to check the second counter, and if it
@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
{
/* the rtc gets round the y2k problem by just not supporting it */
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
if (tm->tm_year < 100)
pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* we get around y2k by simply not supporting it */
if (year < 0 || year >= 100) {
dev_err(dev, "rtc only supports 100 years\n");
return -EINVAL;
}
writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC);
writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN);
writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR);
writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE);
writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON);
writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR);
writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN);
writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
return 0;
}
@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *alm_tm = &alrm->time;
void __iomem *base = s3c_rtc_base;
unsigned int alm_en;
alm_tm->tm_sec = readb(S3C2410_ALMSEC);
alm_tm->tm_min = readb(S3C2410_ALMMIN);
alm_tm->tm_hour = readb(S3C2410_ALMHOUR);
alm_tm->tm_mon = readb(S3C2410_ALMMON);
alm_tm->tm_mday = readb(S3C2410_ALMDATE);
alm_tm->tm_year = readb(S3C2410_ALMYEAR);
alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
alm_en = readb(S3C2410_RTCALM);
alm_en = readb(base + S3C2410_RTCALM);
pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
alm_en,
@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *tm = &alrm->time;
void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, S3C2410_RTCALM);
alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, base + S3C2410_RTCALM);
if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
alrm_en |= S3C2410_RTCALM_SECEN;
writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC);
writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
}
if (tm->tm_min < 60 && tm->tm_min >= 0) {
alrm_en |= S3C2410_RTCALM_MINEN;
writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN);
writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
}
if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
alrm_en |= S3C2410_RTCALM_HOUREN;
writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR);
writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
}
pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
writeb(alrm_en, S3C2410_RTCALM);
writeb(alrm_en, base + S3C2410_RTCALM);
if (0) {
alrm_en = readb(S3C2410_RTCALM);
alrm_en = readb(base + S3C2410_RTCALM);
alrm_en &= ~S3C2410_RTCALM_ALMEN;
writeb(alrm_en, S3C2410_RTCALM);
writeb(alrm_en, base + S3C2410_RTCALM);
disable_irq_wake(s3c_rtc_alarmno);
}
@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev,
static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned int rtcalm = readb(S3C2410_RTCALM);
unsigned int ticnt = readb (S3C2410_TICNT);
unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
seq_printf(seq, "alarm_IRQ\t: %s\n",
(rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = {
static void s3c_rtc_enable(struct platform_device *pdev, int en)
{
void __iomem *base = s3c_rtc_base;
unsigned int tmp;
if (s3c_rtc_base == NULL)
return;
if (!en) {
tmp = readb(S3C2410_RTCCON);
writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON);
tmp = readb(base + S3C2410_RTCCON);
writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
tmp = readb(S3C2410_TICNT);
writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT);
tmp = readb(base + S3C2410_TICNT);
writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
} else {
/* re-enable the device, and check it is ok */
if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
tmp = readb(S3C2410_RTCCON);
writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON);
tmp = readb(base + S3C2410_RTCCON);
writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
}
if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
tmp = readb(S3C2410_RTCCON);
writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON);
tmp = readb(base + S3C2410_RTCCON);
writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
}
if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
tmp = readb(S3C2410_RTCCON);
writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON);
tmp = readb(base + S3C2410_RTCCON);
writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
}
}
}
@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
}
s3c_rtc_mem = request_mem_region(res->start,
res->end-res->start+1,
pdev->name);
res->end-res->start+1,
pdev->name);
if (s3c_rtc_mem == NULL) {
dev_err(&pdev->dev, "failed to reserve memory region\n");
@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON));
pr_debug("s3c2410_rtc: RTCCON=%02x\n",
readb(s3c_rtc_base + S3C2410_RTCCON));
s3c_rtc_setfreq(s3c_rtc_freq);
@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
/* save TICNT for anyone using periodic interrupts */
ticnt_save = readb(S3C2410_TICNT);
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
/* calculate time delta for suspend */
@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
rtc_tm_to_time(&tm, &time.tv_sec);
restore_time_delta(&s3c_rtc_delta, &time);
writeb(ticnt_save, S3C2410_TICNT);
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
return 0;
}
#else

View file

@ -54,11 +54,11 @@ struct dasd_devmap {
*/
struct dasd_server_ssid_map {
struct list_head list;
struct server_id {
struct system_id {
char vendor[4];
char serial[15];
__u16 ssid;
} sid;
__u16 ssid;
};
static struct list_head dasd_server_ssid_list;
@ -904,14 +904,14 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
return -ENOMEM;
strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
srv->ssid = uid->ssid;
srv->sid.ssid = uid->ssid;
/* server is already contained ? */
spin_lock(&dasd_devmap_lock);
devmap->uid = *uid;
list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
if (!memcmp(&srv->sid, &tmp->sid,
sizeof(struct dasd_server_ssid_map))) {
sizeof(struct system_id))) {
kfree(srv);
srv = NULL;
break;

View file

@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
* Valide storage server of current device.
*/
static int
dasd_eckd_validate_server(struct dasd_device *device)
dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
{
int rc;
@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
return 0;
rc = dasd_eckd_psf_ssc(device);
if (rc)
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
DEV_MESSAGE(KERN_INFO, device,
"Perform Subsystem Function returned rc=%d", rc);
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
DEV_MESSAGE(KERN_INFO, device,
"PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
uid->vendor, uid->serial, uid->ssid, rc);
/* RE-Read Configuration Data */
return dasd_eckd_read_conf(device);
}
@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
return rc;
rc = dasd_set_uid(device->cdev, &uid);
if (rc == 1) /* new server found */
rc = dasd_eckd_validate_server(device);
rc = dasd_eckd_validate_server(device, &uid);
if (rc)
return rc;

View file

@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
printk("\n");
}
/****************************************************************/
/****** Functions to handle the request ID hash table ********/
/****************************************************************/
#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
{
int i;
adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
GFP_KERNEL);
if (!adapter->req_list)
return -ENOMEM;
for (i=0; i<REQUEST_LIST_SIZE; i++)
INIT_LIST_HEAD(&adapter->req_list[i]);
return 0;
}
static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i;
for (i=0; i<REQUEST_LIST_SIZE; i++) {
if (list_empty(&adapter->req_list[i]))
continue;
list_for_each_entry_safe(request, tmp,
&adapter->req_list[i], list)
list_del(&request->list);
}
kfree(adapter->req_list);
}
void zfcp_reqlist_add(struct zfcp_adapter *adapter,
struct zfcp_fsf_req *fsf_req)
{
unsigned int i;
i = fsf_req->req_id % REQUEST_LIST_SIZE;
list_add_tail(&fsf_req->list, &adapter->req_list[i]);
}
void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i, counter;
u64 dbg_tmp[2];
i = req_id % REQUEST_LIST_SIZE;
BUG_ON(list_empty(&adapter->req_list[i]));
counter = 0;
list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
if (request->req_id == req_id) {
dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
dbg_tmp[1] = (u64) counter;
debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
list_del(&request->list);
break;
}
counter++;
}
}
struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
unsigned long req_id)
{
struct zfcp_fsf_req *request, *tmp;
unsigned int i;
i = req_id % REQUEST_LIST_SIZE;
list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
if (request->req_id == req_id)
return request;
return NULL;
}
int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
{
unsigned int i;
for (i=0; i<REQUEST_LIST_SIZE; i++)
if (!list_empty(&adapter->req_list[i]))
return 0;
return 1;
}
#undef ZFCP_LOG_AREA
/****************************************************************/
/************** Uncategorised Functions *************************/
/****************************************************************/
@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->port_remove_lh);
/* initialize list of fsf requests */
spin_lock_init(&adapter->fsf_req_list_lock);
INIT_LIST_HEAD(&adapter->fsf_req_list_head);
spin_lock_init(&adapter->req_list_lock);
retval = zfcp_reqlist_init(adapter);
if (retval) {
ZFCP_LOG_INFO("request list initialization failed\n");
goto failed_low_mem_buffers;
}
/* initialize debug locks */
@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
* !0 - struct zfcp_adapter data structure could not be removed
* (e.g. still used)
* locks: adapter list write lock is assumed to be held by caller
* adapter->fsf_req_list_lock is taken and released within this
* function and must not be held on entry
*/
void
zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
/* sanity check: no pending FSF requests */
spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
retval = !list_empty(&adapter->fsf_req_list_head);
spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
if (retval) {
spin_lock_irqsave(&adapter->req_list_lock, flags);
retval = zfcp_reqlist_isempty(adapter);
spin_unlock_irqrestore(&adapter->req_list_lock, flags);
if (!retval) {
ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
"%i requests outstanding\n",
zfcp_get_busid_by_adapter(adapter), adapter,
atomic_read(&adapter->fsf_reqs_active));
atomic_read(&adapter->reqs_active));
retval = -EBUSY;
goto out;
}
@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
zfcp_free_low_mem_buffers(adapter);
/* free memory of adapter data structure and queues */
zfcp_qdio_free_queues(adapter);
zfcp_reqlist_free(adapter);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
ZFCP_LOG_TRACE("freeing adapter structure\n");

View file

@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
retval = zfcp_adapter_scsi_register(adapter);
if (retval)
goto out_scsi_register;
/* initialize request counter */
BUG_ON(!zfcp_reqlist_isempty(adapter));
adapter->req_no = 0;
zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);

View file

@ -52,7 +52,7 @@
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
#define ZFCP_VERSION "4.7.0"
#define ZFCP_VERSION "4.8.0"
/**
* zfcp_sg_to_address - determine kernel address from struct scatterlist
@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
#define REQUEST_LIST_SIZE 128
/********************* SCSI SPECIFIC DEFINES *********************************/
#define ZFCP_SCSI_ER_TIMEOUT (100*HZ)
#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
@ -886,11 +886,11 @@ struct zfcp_adapter {
struct list_head port_remove_lh; /* head of ports to be
removed */
u32 ports; /* number of remote ports */
struct timer_list scsi_er_timer; /* SCSI err recovery watch */
struct list_head fsf_req_list_head; /* head of FSF req list */
spinlock_t fsf_req_list_lock; /* lock for ops on list of
FSF requests */
atomic_t fsf_reqs_active; /* # active FSF reqs */
struct timer_list scsi_er_timer; /* SCSI err recovery watch */
atomic_t reqs_active; /* # active FSF reqs */
unsigned long req_no; /* unique FSF req number */
struct list_head *req_list; /* list of pending reqs */
spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue request_queue; /* request queue */
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
@ -986,6 +986,7 @@ struct zfcp_unit {
/* FSF request */
struct zfcp_fsf_req {
struct list_head list; /* list of FSF requests */
unsigned long req_id; /* unique request ID */
struct zfcp_adapter *adapter; /* adapter request belongs to */
u8 sbal_number; /* nr of SBALs free for use */
u8 sbal_first; /* first SBAL for this request */

View file

@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
static int zfcp_erp_action_dismiss_port(struct zfcp_port *);
static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
struct zfcp_port *, struct zfcp_unit *);
@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
zfcp_erp_adapter_reopen(adapter, 0);
}
/*
* function: zfcp_fsf_scsi_er_timeout_handler
/**
* zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
*
* purpose: This function needs to be called whenever a SCSI error recovery
* action (abort/reset) does not return.
* Re-opening the adapter means that the command can be returned
* by zfcp (it is guarranteed that it does not return via the
* adapter anymore). The buffer can then be used again.
*
* returns: sod all
* This function needs to be called whenever a SCSI error recovery
* action (abort/reset) does not return. Re-opening the adapter means
* that the abort/reset command can be returned by zfcp. It won't complete
* via the adapter anymore (because qdio queues are closed). If ERP is
* already running on this adapter it will be stopped.
*/
void
zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
unsigned long flags;
ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
"Restarting all operations on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
zfcp_erp_adapter_reopen(adapter, 0);
return;
write_lock_irqsave(&adapter->erp_lock, flags);
if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status)) {
zfcp_erp_modify_adapter_status(adapter,
ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
ZFCP_CLEAR);
zfcp_erp_action_dismiss_adapter(adapter);
write_unlock_irqrestore(&adapter->erp_lock, flags);
/* dismiss all pending requests including requests for ERP */
zfcp_fsf_req_dismiss_all(adapter);
adapter->fsf_req_seq_no = 0;
} else
write_unlock_irqrestore(&adapter->erp_lock, flags);
zfcp_erp_adapter_reopen(adapter, 0);
}
/*
@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
return retval;
}
/*
* function:
*
* purpose: disable I/O,
* return any open requests and clean them up,
* aim: no pending and incoming I/O
*
* returns:
/**
* zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
*/
static void
zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
{
debug_text_event(adapter->erp_dbf, 6, "a_bl");
zfcp_erp_modify_adapter_status(adapter,
@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
clear_mask, ZFCP_CLEAR);
}
/*
* function:
*
* purpose: enable I/O
*
* returns:
/**
* zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
*/
static void
zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
debug_text_event(adapter->erp_dbf, 6, "a_ubl");
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
struct zfcp_adapter *adapter = erp_action->adapter;
if (erp_action->fsf_req) {
/* take lock to ensure that request is not being deleted meanwhile */
spin_lock(&adapter->fsf_req_list_lock);
/* check whether fsf req does still exist */
list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
if (fsf_req == erp_action->fsf_req)
break;
if (fsf_req && (fsf_req->erp_action == erp_action)) {
/* take lock to ensure that request is not deleted meanwhile */
spin_lock(&adapter->req_list_lock);
if ((!zfcp_reqlist_ismember(adapter,
erp_action->fsf_req->req_id)) &&
(fsf_req->erp_action == erp_action)) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &fsf_req,
sizeof (unsigned long));
/* dismiss fsf_req of timed out or dismissed erp_action */
/* dismiss fsf_req of timed out/dismissed erp_action */
if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
debug_text_event(adapter->erp_dbf, 3,
@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
*/
erp_action->fsf_req = NULL;
}
spin_unlock(&adapter->fsf_req_list_lock);
spin_unlock(&adapter->req_list_lock);
} else
debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
return retval;
}
/*
* purpose: generic handler for asynchronous events related to erp_action events
* (normal completion, time-out, dismissing, retry after
* low memory condition)
/**
* zfcp_erp_async_handler_nolock - complete erp_action
*
* note: deletion of timer is not required (e.g. in case of a time-out),
* but a second try does no harm,
* we leave it in here to allow for greater simplification
*
* returns: 0 - there was an action to handle
* !0 - otherwise
* Used for normal completion, time-out, dismissal and failure after
* low memory condition.
*/
static int
zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
unsigned long set_mask)
static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
unsigned long set_mask)
{
int retval;
struct zfcp_adapter *adapter = erp_action->adapter;
if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
del_timer(&erp_action->timer);
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
retval = 0;
} else {
/* action is ready or gone - nothing to do */
debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
debug_event(adapter->erp_dbf, 3, &erp_action->action,
sizeof (int));
retval = 1;
}
return retval;
}
/*
* purpose: generic handler for asynchronous events related to erp_action
* events (normal completion, time-out, dismissing, retry after
* low memory condition)
*
* note: deletion of timer is not required (e.g. in case of a time-out),
* but a second try does no harm,
* we leave it in here to allow for greater simplification
*
* returns: 0 - there was an action to handle
* !0 - otherwise
/**
* zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
*/
int
zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
unsigned long set_mask)
void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
unsigned long set_mask)
{
struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
int retval;
write_lock_irqsave(&adapter->erp_lock, flags);
retval = zfcp_erp_async_handler_nolock(erp_action, set_mask);
zfcp_erp_async_handler_nolock(erp_action, set_mask);
write_unlock_irqrestore(&adapter->erp_lock, flags);
return retval;
}
/*
@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
}
/*
* purpose: is called for an erp_action which needs to be ended
* though not being done,
* this is usually required if an higher is generated,
* action gets an appropriate flag and will be processed
* accordingly
/**
* zfcp_erp_action_dismiss - dismiss an erp_action
*
* locks: erp_lock held (thus we need to call another handler variant)
* adapter->erp_lock must be held
*
* Dismissal of an erp_action is usually required if an erp_action of
* higher priority is generated.
*/
static int
zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
return 0;
}
int
@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
return retval;
}
/*
* function: zfcp_qdio_cleanup
*
* purpose: cleans up QDIO operation for the specified adapter
*
* returns: 0 - successful cleanup
* !0 - failed cleanup
/**
* zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
*/
int
static void
zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
{
int retval = ZFCP_ERP_SUCCEEDED;
int first_used;
int used_count;
struct zfcp_adapter *adapter = erp_action->adapter;
@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
"queues on adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
retval = ZFCP_ERP_FAILED;
goto out;
return;
}
/*
* Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
* do_QDIO won't be called while qdio_shutdown is in progress.
*/
write_lock_irq(&adapter->request_queue.queue_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
write_unlock_irq(&adapter->request_queue.queue_lock);
@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
adapter->request_queue.free_index = 0;
atomic_set(&adapter->request_queue.free_count, 0);
adapter->request_queue.distance_from_int = 0;
out:
return retval;
}
static int
@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
"%s)\n", zfcp_get_busid_by_adapter(adapter));
ret = ZFCP_ERP_FAILED;
}
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) {
ZFCP_LOG_INFO("error: exchange port data failed (adapter "
/* don't treat as error for the sake of compatibility */
if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
"%s\n", zfcp_get_busid_by_adapter(adapter));
ret = ZFCP_ERP_FAILED;
}
return ret;
}
@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
return retval;
}
/*
* function: zfcp_fsf_cleanup
*
* purpose: cleanup FSF operation for specified adapter
*
* returns: 0 - FSF operation successfully cleaned up
* !0 - failed to cleanup FSF operation for this adapter
/**
* zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
*/
static int
static void
zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
{
int retval = ZFCP_ERP_SUCCEEDED;
struct zfcp_adapter *adapter = erp_action->adapter;
/*
@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
return retval;
}
/*
@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
}
static int
zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
{
int retval = 0;
struct zfcp_port *port;
debug_text_event(adapter->erp_dbf, 5, "a_actab");
@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
else
list_for_each_entry(port, &adapter->port_list_head, list)
zfcp_erp_action_dismiss_port(port);
return retval;
}
static int
zfcp_erp_action_dismiss_port(struct zfcp_port *port)
static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
{
int retval = 0;
struct zfcp_unit *unit;
struct zfcp_adapter *adapter = port->adapter;
@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
else
list_for_each_entry(unit, &port->unit_list_head, list)
zfcp_erp_action_dismiss_unit(unit);
return retval;
}
static int
zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
{
int retval = 0;
struct zfcp_adapter *adapter = unit->port->adapter;
debug_text_event(adapter->erp_dbf, 5, "u_actab");
debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
zfcp_erp_action_dismiss(&unit->erp_action);
return retval;
}
static inline void

View file

@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
struct zfcp_fsf_req *);
extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
(struct zfcp_fsf_req *, int, int);
@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
extern int zfcp_erp_wait(struct zfcp_adapter *);
extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
extern int zfcp_test_link(struct zfcp_port *);
@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
struct zfcp_fsf_req *);
extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *);
extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
unsigned long);
extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
#endif /* ZFCP_EXT_H */

Some files were not shown because too many files have changed in this diff Show more