1
0
Fork 0

Merge with git+ssh://master.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

hifive-unleashed-5.1
Jody McIntyre 2006-03-28 20:24:39 -05:00
commit c0e4077c94
780 changed files with 23046 additions and 7839 deletions

View File

@ -1127,8 +1127,10 @@ S: Carnegie, Pennsylvania 15106-4304
S: USA
N: Philip Gladstone
E: philip@raptor.com
E: philip@gladstonefamily.net
D: Kernel / timekeeping stuff
S: Carlisle, MA 01741
S: USA
N: Jan-Benedict Glaw
E: jbglaw@lug-owl.de
@ -3741,10 +3743,11 @@ D: Mylex DAC960 PCI RAID driver
D: Miscellaneous kernel fixes
N: Alessandro Zummo
E: azummo@ita.flashnet.it
W: http://freepage.logicom.it/azummo/
E: a.zummo@towertech.it
D: CMI8330 support is sb_card.c
D: ISAPnP fixes in sb_card.c
D: ZyXEL omni.net lcd plus driver
D: RTC subsystem
S: Italy
N: Marc Zyngier

View File

@ -199,6 +199,8 @@ address during PCI bus mastering you might do something like:
"mydev: 24-bit DMA addressing not available.\n");
goto ignore_this_device;
}
[Better use DMA_24BIT_MASK instead of 0x00ffffff.
See linux/include/dma-mapping.h for reference.]
When pci_set_dma_mask() is successful, and returns zero, the PCI layer
saves away this mask you have provided. The PCI layer will use this

View File

@ -605,7 +605,7 @@ are the same as those shown in the preceding section, so they are omitted.
{
int cpu;
for_each_cpu(cpu)
for_each_possible_cpu(cpu)
run_on(cpu);
}

View File

@ -26,7 +26,7 @@ Installing a bootloader
A couple of bootloaders able to boot Linux on Assabet are available:
BLOB (http://www.lart.tudelft.nl/lartware/blob/)
BLOB (http://www.lartmaker.nl/lartware/blob/)
BLOB is a bootloader used within the LART project. Some contributed
patches were merged into BLOB to add support for Assabet.

View File

@ -11,4 +11,4 @@ is under development, with plenty of others in different stages of
planning.
The hardware designs for this board have been released under an open license;
see the LART page at http://www.lart.tudelft.nl/ for more information.
see the LART page at http://www.lartmaker.nl/ for more information.

View File

@ -132,8 +132,18 @@ Some new queue property settings:
limit. No highmem default.
blk_queue_max_sectors(q, max_sectors)
Maximum size request you can handle in units of 512 byte
sectors. 255 default.
Sets two variables that limit the size of the request.
- The request queue's max_sectors, which is a soft size in
in units of 512 byte sectors, and could be dynamically varied
by the core kernel.
- The request queue's max_hw_sectors, which is a hard limit
and reflects the maximum size request a driver can handle
in units of 512 byte sectors.
The default for both max_sectors and max_hw_sectors is
255. The upper limit of max_sectors is 1024.
blk_queue_max_phys_segments(q, max_segments)
Maximum physical segments you can handle in a request. 128

View File

@ -97,13 +97,13 @@ at which time hotplug is disabled.
You really dont need to manipulate any of the system cpu maps. They should
be read-only for most use. When setting up per-cpu resources almost always use
cpu_possible_map/for_each_cpu() to iterate.
cpu_possible_map/for_each_possible_cpu() to iterate.
Never use anything other than cpumask_t to represent bitmap of CPUs.
#include <linux/cpumask.h>
for_each_cpu - Iterate over cpu_possible_map
for_each_possible_cpu - Iterate over cpu_possible_map
for_each_online_cpu - Iterate over cpu_online_map
for_each_present_cpu - Iterate over cpu_present_map
for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask.

View File

@ -1,5 +1,5 @@
Export cpu topology info by sysfs. Items (attributes) are similar
Export cpu topology info via sysfs. Items (attributes) are similar
to /proc/cpuinfo.
1) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
@ -12,7 +12,7 @@ represent the thread siblings to cpu X in the same core;
represent the thread siblings to cpu X in the same physical package;
To implement it in an architecture-neutral way, a new source file,
driver/base/topology.c, is to export the 5 attributes.
drivers/base/topology.c, is to export the 4 attributes.
If one architecture wants to support this feature, it just needs to
implement 4 defines, typically in file include/asm-XXX/topology.h.

View File

@ -1,27 +1,47 @@
00-INDEX
- this file (info on some of the filesystems supported by linux).
Exporting
- explanation of how to make filesystems exportable.
Locking
- info on locking rules as they pertain to Linux VFS.
adfs.txt
- info and mount options for the Acorn Advanced Disc Filing System.
afs.txt
- info and examples for the distributed AFS (Andrew File System) fs.
affs.txt
- info and mount options for the Amiga Fast File System.
automount-support.txt
- information about filesystem automount support.
befs.txt
- information about the BeOS filesystem for Linux.
bfs.txt
- info for the SCO UnixWare Boot Filesystem (BFS).
cifs.txt
- description of the CIFS filesystem
- description of the CIFS filesystem.
coda.txt
- description of the CODA filesystem.
configfs/
- directory containing configfs documentation and example code.
cramfs.txt
- info on the cram filesystem for small storage (ROMs etc)
- info on the cram filesystem for small storage (ROMs etc).
dentry-locking.txt
- info on the RCU-based dcache locking model.
devfs/
- directory containing devfs documentation.
directory-locking
- info about the locking scheme used for directory operations.
dlmfs.txt
- info on the userspace interface to the OCFS2 DLM.
ext2.txt
- info, mount options and specifications for the Ext2 filesystem.
ext3.txt
- info, mount options and specifications for the Ext3 filesystem.
files.txt
- info on file management in the Linux kernel.
fuse.txt
- info on the Filesystem in User SpacE including mount options.
hfs.txt
- info on the Macintosh HFS Filesystem for Linux.
hpfs.txt
- info and mount options for the OS/2 HPFS.
isofs.txt
@ -32,23 +52,43 @@ ncpfs.txt
- info on Novell Netware(tm) filesystem using NCP protocol.
ntfs.txt
- info and mount options for the NTFS filesystem (Windows NT).
proc.txt
- info on Linux's /proc filesystem.
ocfs2.txt
- info and mount options for the OCFS2 clustered filesystem.
porting
- various information on filesystem porting.
proc.txt
- info on Linux's /proc filesystem.
ramfs-rootfs-initramfs.txt
- info on the 'in memory' filesystems ramfs, rootfs and initramfs.
reiser4.txt
- info on the Reiser4 filesystem based on dancing tree algorithms.
relayfs.txt
- info on relayfs, for efficient streaming from kernel to user space.
romfs.txt
- Description of the ROMFS filesystem.
- description of the ROMFS filesystem.
smbfs.txt
- info on using filesystems with the SMB protocol (Windows 3.11 and NT)
- info on using filesystems with the SMB protocol (Win 3.11 and NT).
spufs.txt
- info and mount options for the SPU filesystem used on Cell.
sysfs-pci.txt
- info on accessing PCI device resources through sysfs.
sysfs.txt
- info on sysfs, a ram-based filesystem for exporting kernel objects.
sysv-fs.txt
- info on the SystemV/V7/Xenix/Coherent filesystem.
tmpfs.txt
- info on tmpfs, a filesystem that holds all files in virtual memory.
udf.txt
- info and mount options for the UDF filesystem.
ufs.txt
- info on the ufs filesystem.
v9fs.txt
- v9fs is a Unix implementation of the Plan 9 9p remote fs protocol.
vfat.txt
- info on using the VFAT filesystem used in Windows NT and Windows 95
vfs.txt
- Overview of the Virtual File System
- overview of the Virtual File System
xfs.txt
- info and mount options for the XFS filesystem.
xip.txt
- info on execute-in-place for file mappings.

View File

@ -78,8 +78,6 @@ Code Seq# Include File Comments
'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
<ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
'6' 00-10 <asm-i386/processor.h> Intel IA32 microcode update driver
<mailto:tigran@veritas.com>
'8' all SNP8023 advanced NIC card
<mailto:mcr@solidum.com>
'A' 00-1F linux/apm_bios.h

View File

@ -29,7 +29,7 @@ address is written to $4a, then the whole Byte is written to
$48, while it doesn't matter how often you're writing to $4a
as long as $48 is not touched. After $48 has been written,
the whole card disappears from $e8 and is mapped to the new
address just written. Make shure $4a is written before $48,
address just written. Make sure $4a is written before $48,
otherwise your chance is only 1:16 to find the board :-).
The local memory-map is even active when mapped to $e8:

View File

@ -87,7 +87,7 @@
* would fail and generate an error message in the system log.
* - For opt_c: slave should not be set to the master's setting
* while it is running. It was already set during enslave. To
* simplify things, it is now handeled separately.
* simplify things, it is now handled separately.
*
* - 2003/12/01 - Shmulik Hen <shmulik.hen at intel dot com>
* - Code cleanup and style changes

View File

@ -115,6 +115,9 @@ pnp_unregister_protocol
pnp_register_driver
- adds a PnP driver to the Plug and Play Layer
- this includes driver model integration
- returns zero for success or a negative error number for failure; count
calls to the .add() method if you need to know how many devices bind to
the driver
pnp_unregister_driver
- removes a PnP driver from the Plug and Play Layer

View File

@ -0,0 +1,182 @@
Started by Paul Jackson <pj@sgi.com>
The robust futex ABI
--------------------
Robust_futexes provide a mechanism that is used in addition to normal
futexes, for kernel assist of cleanup of held locks on task exit.
The interesting data as to what futexes a thread is holding is kept on a
linked list in user space, where it can be updated efficiently as locks
are taken and dropped, without kernel intervention. The only additional
kernel intervention required for robust_futexes above and beyond what is
required for futexes is:
1) a one time call, per thread, to tell the kernel where its list of
held robust_futexes begins, and
2) internal kernel code at exit, to handle any listed locks held
by the exiting thread.
The existing normal futexes already provide a "Fast Userspace Locking"
mechanism, which handles uncontested locking without needing a system
call, and handles contested locking by maintaining a list of waiting
threads in the kernel. Options on the sys_futex(2) system call support
waiting on a particular futex, and waking up the next waiter on a
particular futex.
For robust_futexes to work, the user code (typically in a library such
as glibc linked with the application) has to manage and place the
necessary list elements exactly as the kernel expects them. If it fails
to do so, then improperly listed locks will not be cleaned up on exit,
probably causing deadlock or other such failure of the other threads
waiting on the same locks.
A thread that anticipates possibly using robust_futexes should first
issue the system call:
asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head, size_t len);
The pointer 'head' points to a structure in the threads address space
consisting of three words. Each word is 32 bits on 32 bit arch's, or 64
bits on 64 bit arch's, and local byte order. Each thread should have
its own thread private 'head'.
If a thread is running in 32 bit compatibility mode on a 64 native arch
kernel, then it can actually have two such structures - one using 32 bit
words for 32 bit compatibility mode, and one using 64 bit words for 64
bit native mode. The kernel, if it is a 64 bit kernel supporting 32 bit
compatibility mode, will attempt to process both lists on each task
exit, if the corresponding sys_set_robust_list() call has been made to
setup that list.
The first word in the memory structure at 'head' contains a
pointer to a single linked list of 'lock entries', one per lock,
as described below. If the list is empty, the pointer will point
to itself, 'head'. The last 'lock entry' points back to the 'head'.
The second word, called 'offset', specifies the offset from the
address of the associated 'lock entry', plus or minus, of what will
be called the 'lock word', from that 'lock entry'. The 'lock word'
is always a 32 bit word, unlike the other words above. The 'lock
word' holds 3 flag bits in the upper 3 bits, and the thread id (TID)
of the thread holding the lock in the bottom 29 bits. See further
below for a description of the flag bits.
The third word, called 'list_op_pending', contains transient copy of
the address of the 'lock entry', during list insertion and removal,
and is needed to correctly resolve races should a thread exit while
in the middle of a locking or unlocking operation.
Each 'lock entry' on the single linked list starting at 'head' consists
of just a single word, pointing to the next 'lock entry', or back to
'head' if there are no more entries. In addition, nearby to each 'lock
entry', at an offset from the 'lock entry' specified by the 'offset'
word, is one 'lock word'.
The 'lock word' is always 32 bits, and is intended to be the same 32 bit
lock variable used by the futex mechanism, in conjunction with
robust_futexes. The kernel will only be able to wakeup the next thread
waiting for a lock on a threads exit if that next thread used the futex
mechanism to register the address of that 'lock word' with the kernel.
For each futex lock currently held by a thread, if it wants this
robust_futex support for exit cleanup of that lock, it should have one
'lock entry' on this list, with its associated 'lock word' at the
specified 'offset'. Should a thread die while holding any such locks,
the kernel will walk this list, mark any such locks with a bit
indicating their holder died, and wakeup the next thread waiting for
that lock using the futex mechanism.
When a thread has invoked the above system call to indicate it
anticipates using robust_futexes, the kernel stores the passed in 'head'
pointer for that task. The task may retrieve that value later on by
using the system call:
asmlinkage long
sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
size_t __user *len_ptr);
It is anticipated that threads will use robust_futexes embedded in
larger, user level locking structures, one per lock. The kernel
robust_futex mechanism doesn't care what else is in that structure, so
long as the 'offset' to the 'lock word' is the same for all
robust_futexes used by that thread. The thread should link those locks
it currently holds using the 'lock entry' pointers. It may also have
other links between the locks, such as the reverse side of a double
linked list, but that doesn't matter to the kernel.
By keeping its locks linked this way, on a list starting with a 'head'
pointer known to the kernel, the kernel can provide to a thread the
essential service available for robust_futexes, which is to help clean
up locks held at the time of (a perhaps unexpectedly) exit.
Actual locking and unlocking, during normal operations, is handled
entirely by user level code in the contending threads, and by the
existing futex mechanism to wait for, and wakeup, locks. The kernels
only essential involvement in robust_futexes is to remember where the
list 'head' is, and to walk the list on thread exit, handling locks
still held by the departing thread, as described below.
There may exist thousands of futex lock structures in a threads shared
memory, on various data structures, at a given point in time. Only those
lock structures for locks currently held by that thread should be on
that thread's robust_futex linked lock list a given time.
A given futex lock structure in a user shared memory region may be held
at different times by any of the threads with access to that region. The
thread currently holding such a lock, if any, is marked with the threads
TID in the lower 29 bits of the 'lock word'.
When adding or removing a lock from its list of held locks, in order for
the kernel to correctly handle lock cleanup regardless of when the task
exits (perhaps it gets an unexpected signal 9 in the middle of
manipulating this list), the user code must observe the following
protocol on 'lock entry' insertion and removal:
On insertion:
1) set the 'list_op_pending' word to the address of the 'lock word'
to be inserted,
2) acquire the futex lock,
3) add the lock entry, with its thread id (TID) in the bottom 29 bits
of the 'lock word', to the linked list starting at 'head', and
4) clear the 'list_op_pending' word.
On removal:
1) set the 'list_op_pending' word to the address of the 'lock word'
to be removed,
2) remove the lock entry for this lock from the 'head' list,
2) release the futex lock, and
2) clear the 'lock_op_pending' word.
On exit, the kernel will consider the address stored in
'list_op_pending' and the address of each 'lock word' found by walking
the list starting at 'head'. For each such address, if the bottom 29
bits of the 'lock word' at offset 'offset' from that address equals the
exiting threads TID, then the kernel will do two things:
1) if bit 31 (0x80000000) is set in that word, then attempt a futex
wakeup on that address, which will waken the next thread that has
used to the futex mechanism to wait on that address, and
2) atomically set bit 30 (0x40000000) in the 'lock word'.
In the above, bit 31 was set by futex waiters on that lock to indicate
they were waiting, and bit 30 is set by the kernel to indicate that the
lock owner died holding the lock.
The kernel exit code will silently stop scanning the list further if at
any point:
1) the 'head' pointer or an subsequent linked list pointer
is not a valid address of a user space word
2) the calculated location of the 'lock word' (address plus
'offset') is not the valud address of a 32 bit user space
word
3) if the list contains more than 1 million (subject to
future kernel configuration changes) elements.
When the kernel sees a list entry whose 'lock word' doesn't have the
current threads TID in the lower 29 bits, it does nothing with that
entry, and goes on to the next entry.
Bit 29 (0x20000000) of the 'lock word' is reserved for future use.

View File

@ -0,0 +1,218 @@
Started by: Ingo Molnar <mingo@redhat.com>
Background
----------
what are robust futexes? To answer that, we first need to understand
what futexes are: normal futexes are special types of locks that in the
noncontended case can be acquired/released from userspace without having
to enter the kernel.
A futex is in essence a user-space address, e.g. a 32-bit lock variable
field. If userspace notices contention (the lock is already owned and
someone else wants to grab it too) then the lock is marked with a value
that says "there's a waiter pending", and the sys_futex(FUTEX_WAIT)
syscall is used to wait for the other guy to release it. The kernel
creates a 'futex queue' internally, so that it can later on match up the
waiter with the waker - without them having to know about each other.
When the owner thread releases the futex, it notices (via the variable
value) that there were waiter(s) pending, and does the
sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have
taken and released the lock, the futex is again back to 'uncontended'
state, and there's no in-kernel state associated with it. The kernel
completely forgets that there ever was a futex at that address. This
method makes futexes very lightweight and scalable.
"Robustness" is about dealing with crashes while holding a lock: if a
process exits prematurely while holding a pthread_mutex_t lock that is
also shared with some other process (e.g. yum segfaults while holding a
pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need
to be notified that the last owner of the lock exited in some irregular
way.
To solve such types of problems, "robust mutex" userspace APIs were
created: pthread_mutex_lock() returns an error value if the owner exits
prematurely - and the new owner can decide whether the data protected by
the lock can be recovered safely.
There is a big conceptual problem with futex based mutexes though: it is
the kernel that destroys the owner task (e.g. due to a SEGFAULT), but
the kernel cannot help with the cleanup: if there is no 'futex queue'
(and in most cases there is none, futexes being fast lightweight locks)
then the kernel has no information to clean up after the held lock!
Userspace has no chance to clean up after the lock either - userspace is
the one that crashes, so it has no opportunity to clean up. Catch-22.
In practice, when e.g. yum is kill -9-ed (or segfaults), a system reboot
is needed to release that futex based lock. This is one of the leading
bugreports against yum.
To solve this problem, the traditional approach was to extend the vma
(virtual memory area descriptor) concept to have a notion of 'pending
robust futexes attached to this area'. This approach requires 3 new
syscall variants to sys_futex(): FUTEX_REGISTER, FUTEX_DEREGISTER and
FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether
they have a robust_head set. This approach has two fundamental problems
left:
- it has quite complex locking and race scenarios. The vma-based
approach had been pending for years, but they are still not completely
reliable.
- they have to scan _every_ vma at sys_exit() time, per thread!
The second disadvantage is a real killer: pthread_exit() takes around 1
microsecond on Linux, but with thousands (or tens of thousands) of vmas
every pthread_exit() takes a millisecond or more, also totally
destroying the CPU's L1 and L2 caches!
This is very much noticeable even for normal process sys_exit_group()
calls: the kernel has to do the vma scanning unconditionally! (this is
because the kernel has no knowledge about how many robust futexes there
are to be cleaned up, because a robust futex might have been registered
in another task, and the futex variable might have been simply mmap()-ed
into this process's address space).
This huge overhead forced the creation of CONFIG_FUTEX_ROBUST so that
normal kernels can turn it off, but worse than that: the overhead makes
robust futexes impractical for any type of generic Linux distribution.
So something had to be done.
New approach to robust futexes
------------------------------
At the heart of this new approach there is a per-thread private list of
robust locks that userspace is holding (maintained by glibc) - which
userspace list is registered with the kernel via a new syscall [this
registration happens at most once per thread lifetime]. At do_exit()
time, the kernel checks this user-space list: are there any robust futex
locks to be cleaned up?
In the common case, at do_exit() time, there is no list registered, so
the cost of robust futexes is just a simple current->robust_list != NULL
comparison. If the thread has registered a list, then normally the list
is empty. If the thread/process crashed or terminated in some incorrect
way then the list might be non-empty: in this case the kernel carefully
walks the list [not trusting it], and marks all locks that are owned by
this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if
any).
The list is guaranteed to be private and per-thread at do_exit() time,
so it can be accessed by the kernel in a lockless way.
There is one race possible though: since adding to and removing from the
list is done after the futex is acquired by glibc, there is a few
instructions window for the thread (or process) to die there, leaving
the futex hung. To protect against this possibility, userspace (glibc)
also maintains a simple per-thread 'list_op_pending' field, to allow the
kernel to clean up if the thread dies after acquiring the lock, but just
before it could have added itself to the list. Glibc sets this
list_op_pending field before it tries to acquire the futex, and clears
it after the list-add (or list-remove) has finished.
That's all that is needed - all the rest of robust-futex cleanup is done
in userspace [just like with the previous patches].
Ulrich Drepper has implemented the necessary glibc support for this new
mechanism, which fully enables robust mutexes.
Key differences of this userspace-list based approach, compared to the
vma based method:
- it's much, much faster: at thread exit time, there's no need to loop
over every vma (!), which the VM-based method has to do. Only a very
simple 'is the list empty' op is done.
- no VM changes are needed - 'struct address_space' is left alone.
- no registration of individual locks is needed: robust mutexes dont
need any extra per-lock syscalls. Robust mutexes thus become a very
lightweight primitive - so they dont force the application designer
to do a hard choice between performance and robustness - robust
mutexes are just as fast.
- no per-lock kernel allocation happens.
- no resource limits are needed.
- no kernel-space recovery call (FUTEX_RECOVER) is needed.
- the implementation and the locking is "obvious", and there are no
interactions with the VM.
Performance
-----------
I have benchmarked the time needed for the kernel to process a list of 1
million (!) held locks, using the new method [on a 2GHz CPU]:
- with FUTEX_WAIT set [contended mutex]: 130 msecs
- without FUTEX_WAIT set [uncontended mutex]: 30 msecs
I have also measured an approach where glibc does the lock notification
[which it currently does for !pshared robust mutexes], and that took 256
msecs - clearly slower, due to the 1 million FUTEX_WAKE syscalls
userspace had to do.
(1 million held locks are unheard of - we expect at most a handful of
locks to be held at a time. Nevertheless it's nice to know that this
approach scales nicely.)
Implementation details
----------------------
The patch adds two new syscalls: one to register the userspace list, and
one to query the registered list pointer:
asmlinkage long
sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
asmlinkage long
sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
size_t __user *len_ptr);
List registration is very fast: the pointer is simply stored in
current->robust_list. [Note that in the future, if robust futexes become
widespread, we could extend sys_clone() to register a robust-list head
for new threads, without the need of another syscall.]
So there is virtually zero overhead for tasks not using robust futexes,
and even for robust futex users, there is only one extra syscall per
thread lifetime, and the cleanup operation, if it happens, is fast and
straightforward. The kernel doesnt have any internal distinction between
robust and normal futexes.
If a futex is found to be held at exit time, the kernel sets the
following bit of the futex word:
#define FUTEX_OWNER_DIED 0x40000000
and wakes up the next futex waiter (if any). User-space does the rest of
the cleanup.
Otherwise, robust futexes are acquired by glibc by putting the TID into
the futex field atomically. Waiters set the FUTEX_WAITERS bit:
#define FUTEX_WAITERS 0x80000000
and the remaining bits are for the TID.
Testing, architecture support
-----------------------------
i've tested the new syscalls on x86 and x86_64, and have made sure the
parsing of the userspace list is robust [ ;-) ] even if the list is
deliberately corrupted.
i386 and x86_64 syscalls are wired up at the moment, and Ulrich has
tested the new glibc code (on x86_64 and i386), and it works for his
robust-mutex testcases.
All other architectures should build just fine too - but they wont have
the new syscalls yet.
Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
inline function before writing up the syscalls (that function returns
-ENOSYS right now).

View File

@ -1,4 +1,4 @@
This document gives a brief introduction to the caching
This document gives a brief introduction to the caching
mechanisms in the sunrpc layer that is used, in particular,
for NFS authentication.
@ -25,25 +25,17 @@ The common code handles such things as:
- supporting 'NEGATIVE' as well as positive entries
- allowing an EXPIRED time on cache items, and removing
items after they expire, and are no longe in-use.
Future code extensions are expect to handle
- making requests to user-space to fill in cache entries
- allowing user-space to directly set entries in the cache
- delaying RPC requests that depend on as-yet incomplete
cache entries, and replaying those requests when the cache entry
is complete.
- maintaining last-access times on cache entries
- clean out old entries when the caches become full
The code for performing a cache lookup is also common, but in the form
of a template. i.e. a #define.
Each cache defines a lookup function by using the DefineCacheLookup
macro, or the simpler DefineSimpleCacheLookup macro
- clean out old entries as they expire.
Creating a Cache
----------------
1/ A cache needs a datum to cache. This is in the form of a
1/ A cache needs a datum to store. This is in the form of a
structure definition that must contain a
struct cache_head
as an element, usually the first.
@ -51,35 +43,69 @@ Creating a Cache
Each cache element is reference counted and contains
expiry and update times for use in cache management.
2/ A cache needs a "cache_detail" structure that
describes the cache. This stores the hash table, and some
parameters for cache management.
3/ A cache needs a lookup function. This is created using
the DefineCacheLookup macro. This lookup function is used both
to find entries and to update entries. The normal mode for
updating an entry is to replace the old entry with a new
entry. However it is possible to allow update-in-place
for those caches where it makes sense (no atomicity issues
or indirect reference counting issue)
4/ A cache needs to be registered using cache_register(). This
includes in on a list of caches that will be regularly
cleaned to discard old data. For this to work, some
thread must periodically call cache_clean
describes the cache. This stores the hash table, some
parameters for cache management, and some operations detailing how
to work with particular cache items.
The operations requires are:
struct cache_head *alloc(void)
This simply allocates appropriate memory and returns
a pointer to the cache_detail embedded within the
structure
void cache_put(struct kref *)
This is called when the last reference to an item is
is dropped. The pointer passed is to the 'ref' field
in the cache_head. cache_put should release any
references create by 'cache_init' and, if CACHE_VALID
is set, any references created by cache_update.
It should then release the memory allocated by
'alloc'.
int match(struct cache_head *orig, struct cache_head *new)
test if the keys in the two structures match. Return
1 if they do, 0 if they don't.
void init(struct cache_head *orig, struct cache_head *new)
Set the 'key' fields in 'new' from 'orig'. This may
include taking references to shared objects.
void update(struct cache_head *orig, struct cache_head *new)
Set the 'content' fileds in 'new' from 'orig'.
int cache_show(struct seq_file *m, struct cache_detail *cd,
struct cache_head *h)
Optional. Used to provide a /proc file that lists the
contents of a cache. This should show one item,
usually on just one line.
int cache_request(struct cache_detail *cd, struct cache_head *h,
char **bpp, int *blen)
Format a request to be send to user-space for an item
to be instantiated. *bpp is a buffer of size *blen.
bpp should be moved forward over the encoded message,
and *blen should be reduced to show how much free
space remains. Return 0 on success or <0 if not
enough room or other problem.
int cache_parse(struct cache_detail *cd, char *buf, int len)
A message from user space has arrived to fill out a
cache entry. It is in 'buf' of length 'len'.
cache_parse should parse this, find the item in the
cache with sunrpc_cache_lookup, and update the item
with sunrpc_cache_update.
3/ A cache needs to be registered using cache_register(). This
includes it on a list of caches that will be regularly
cleaned to discard old data.
Using a cache
-------------
To find a value in a cache, call the lookup function passing it a the
datum which contains key, and possibly content, and a flag saying
whether to update the cache with new data from the datum. Depending
on how the cache lookup function was defined, it may take an extra
argument to identify the particular cache in question.
To find a value in a cache, call sunrpc_cache_lookup passing a pointer
to the cache_head in a sample item with the 'key' fields filled in.
This will be passed to ->match to identify the target entry. If no
entry is found, a new entry will be create, added to the cache, and
marked as not containing valid data.
Except in cases of kmalloc failure, the lookup function
will return a new datum which will store the key and
may contain valid content, or may not.
This datum is typically passed to cache_check which determines the
validity of the datum and may later initiate an upcall to fill
in the data.
The item returned is typically passed to cache_check which will check
if the data is valid, and may initiate an up-call to get fresh data.
cache_check will return -ENOENT in the entry is negative or if an up
call is needed but not possible, -EAGAIN if an upcall is pending,
or 0 if the data is valid;
cache_check can be passed a "struct cache_req *". This structure is
typically embedded in the actual request and can be used to create a
@ -90,6 +116,13 @@ item does become valid, the deferred copy of the request will be
revisited (->revisit). It is expected that this method will
reschedule the request for processing.
The value returned by sunrpc_cache_lookup can also be passed to
sunrpc_cache_update to set the content for the item. A second item is
passed which should hold the content. If the item found by _lookup
has valid data, then it is discarded and a new item is created. This
saves any user of an item from worrying about content changing while
it is being inspected. If the item found by _lookup does not contain
valid data, then the content is copied across and CACHE_VALID is set.
Populating a cache
------------------
@ -114,8 +147,8 @@ should be create or updated to have the given content, and the
expiry time should be set on that item.
Reading from a channel is a bit more interesting. When a cache
lookup fail, or when it suceeds but finds an entry that may soon
expiry, a request is lodged for that cache item to be updated by
lookup fails, or when it succeeds but finds an entry that may soon
expire, a request is lodged for that cache item to be updated by
user-space. These requests appear in the channel file.
Successive reads will return successive requests.
@ -130,7 +163,7 @@ Thus a user-space helper is likely to:
write a response
loop.
If it dies and needs to be restarted, any requests that have not be
If it dies and needs to be restarted, any requests that have not been
answered will still appear in the file and will be read by the new
instance of the helper.
@ -142,10 +175,9 @@ Each cache should also define a "cache_request" method which
takes a cache item and encodes a request into the buffer
provided.
Note: If a cache has no active readers on the channel, and has had not
active readers for more than 60 seconds, further requests will not be
added to the channel but instead all looks that do not find a valid
added to the channel but instead all lookups that do not find a valid
entry will fail. This is partly for backward compatibility: The
previous nfs exports table was deemed to be authoritative and a
failed lookup meant a definite 'no'.
@ -154,18 +186,17 @@ request/response format
-----------------------
While each cache is free to use it's own format for requests
and responses over channel, the following is recommended are
and responses over channel, the following is recommended as
appropriate and support routines are available to help:
Each request or response record should be printable ASCII
with precisely one newline character which should be at the end.
Fields within the record should be separated by spaces, normally one.
If spaces, newlines, or nul characters are needed in a field they
much be quotes. two mechanisms are available:
much be quoted. two mechanisms are available:
1/ If a field begins '\x' then it must contain an even number of
hex digits, and pairs of these digits provide the bytes in the
field.
2/ otherwise a \ in the field must be followed by 3 octal digits
which give the code for a byte. Other characters are treated
as them selves. At the very least, space, newlines nul, and
as them selves. At the very least, space, newline, nul, and
'\' must be quoted in this way.

View File

@ -2233,6 +2233,12 @@ M: p_gortmaker@yahoo.com
L: linux-kernel@vger.kernel.org
S: Maintained
REAL TIME CLOCK (RTC) SUBSYSTEM
P: Alessandro Zummo
M: a.zummo@towertech.it
L: linux-kernel@vger.kernel.org
S: Maintained
REISERFS FILE SYSTEM
P: Hans Reiser
M: reiserfs-dev@namesys.com

View File

@ -34,6 +34,7 @@
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <linux/eisa.h>
#include <linux/pfn.h>
#ifdef CONFIG_MAGIC_SYSRQ
#include <linux/sysrq.h>
#include <linux/reboot.h>
@ -42,7 +43,7 @@
#include <asm/setup.h>
#include <asm/io.h>
extern struct notifier_block *panic_notifier_list;
extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
@ -241,9 +242,6 @@ reserve_std_resources(void)
request_resource(io, standard_io_resources+i);
}
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
#define PFN_MAX PFN_DOWN(0x80000000)
#define for_each_mem_cluster(memdesc, cluster, i) \
for ((cluster) = (memdesc)->cluster, (i) = 0; \
@ -472,11 +470,6 @@ page_is_ram(unsigned long pfn)
return 0;
}
#undef PFN_UP
#undef PFN_DOWN
#undef PFN_PHYS
#undef PFN_MAX
void __init
setup_arch(char **cmdline_p)
{
@ -507,7 +500,8 @@ setup_arch(char **cmdline_p)
}
/* Register a call for panic conditions. */
notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
atomic_notifier_chain_register(&panic_notifier_list,
&alpha_panic_block);
#ifdef CONFIG_ALPHA_GENERIC
/* Assume that we've booted from SRM if we haven't booted from MILO.

View File

@ -314,10 +314,11 @@ time_init(void)
if (!est_cycle_freq)
est_cycle_freq = validate_cc_value(calibrate_cc_with_pit());
cc1 = rpcc_after_update_in_progress();
cc1 = rpcc();
/* Calibrate CPU clock -- attempt #2. */
if (!est_cycle_freq) {
cc1 = rpcc_after_update_in_progress();
cc2 = rpcc_after_update_in_progress();
est_cycle_freq = validate_cc_value(cc2 - cc1);
cc1 = cc2;

View File

@ -13,6 +13,7 @@
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/initrd.h>
#include <linux/pfn.h>
#include <asm/hwrpb.h>
#include <asm/pgalloc.h>
@ -27,9 +28,6 @@ bootmem_data_t node_bdata[MAX_NUMNODES];
#define DBGDCONT(args...)
#endif
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
#define for_each_mem_cluster(memdesc, cluster, i) \
for ((cluster) = (memdesc)->cluster, (i) = 0; \
(i) < (memdesc)->numclusters; (i)++, (cluster)++)

View File

@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
config ARM
bool
default y
select RTC_LIB
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@ -152,6 +153,12 @@ config ARCH_IXP2000
help
Support for Intel's IXP2400/2800 (XScale) family of processors.
config ARCH_IXP23XX
bool "IXP23XX-based"
select PCI
help
Support for Intel's IXP23xx (XScale) family of processors.
config ARCH_L7200
bool "LinkUp-L7200"
select FIQ
@ -273,6 +280,8 @@ source "arch/arm/mach-ixp4xx/Kconfig"
source "arch/arm/mach-ixp2000/Kconfig"
source "arch/arm/mach-ixp23xx/Kconfig"
source "arch/arm/mach-pxa/Kconfig"
source "arch/arm/mach-sa1100/Kconfig"
@ -791,7 +800,8 @@ source "drivers/acorn/block/Kconfig"
if PCMCIA || ARCH_CLPS7500 || ARCH_IOP3XX || ARCH_IXP4XX \
|| ARCH_L7200 || ARCH_LH7A40X || ARCH_PXA || ARCH_RPC \
|| ARCH_S3C2410 || ARCH_SA1100 || ARCH_SHARK || FOOTBRIDGE
|| ARCH_S3C2410 || ARCH_SA1100 || ARCH_SHARK || FOOTBRIDGE \
|| ARCH_IXP23XX
source "drivers/ide/Kconfig"
endif
@ -839,6 +849,8 @@ source "drivers/usb/Kconfig"
source "drivers/mmc/Kconfig"
source "drivers/rtc/Kconfig"
endmenu
source "fs/Kconfig"

View File

@ -57,6 +57,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
ifeq ($(CONFIG_AEABI),y)
@ -97,6 +98,7 @@ endif
machine-$(CONFIG_ARCH_IOP3XX) := iop3xx
machine-$(CONFIG_ARCH_IXP4XX) := ixp4xx
machine-$(CONFIG_ARCH_IXP2000) := ixp2000
machine-$(CONFIG_ARCH_IXP23XX) := ixp23xx
machine-$(CONFIG_ARCH_OMAP1) := omap1
machine-$(CONFIG_ARCH_OMAP2) := omap2
incdir-$(CONFIG_ARCH_OMAP) := omap

View File

@ -50,10 +50,6 @@ ifeq ($(CONFIG_ARCH_AT91RM9200),y)
OBJS += head-at91rm9200.o
endif
ifeq ($(CONFIG_DEBUG_ICEDCC),y)
OBJS += ice-dcc.o
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
OBJS += big-endian.o
endif

View File

@ -1,17 +0,0 @@
.text
.global icedcc_putc
icedcc_putc:
mov r2, #0x4000000
1:
subs r2, r2, #1
movlt pc, r14
mrc p14, 0, r1, c0, c0, 0
tst r1, #2
bne 1b
mcr p14, 0, r0, c1, c0, 0
mov pc, r14

View File

@ -20,24 +20,45 @@ unsigned int __machine_arch_type;
#include <linux/string.h>
#include <asm/arch/uncompress.h>
#ifdef STANDALONE_DEBUG
#define putstr printf
#endif
#else
static void putstr(const char *ptr);
#include <linux/compiler.h>
#include <asm/arch/uncompress.h>
#ifdef CONFIG_DEBUG_ICEDCC
#define putstr icedcc_putstr
#define putc icedcc_putc
extern void icedcc_putc(int ch);
static void
icedcc_putstr(const char *ptr)
static void icedcc_putc(int ch)
{
for (; *ptr != '\0'; ptr++) {
icedcc_putc(*ptr);
int status, i = 0x4000000;
do {
if (--i < 0)
return;
asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (status));
} while (status & 2);
asm("mcr p15, 0, %0, c1, c0, 0" : : "r" (ch));
}
#define putc(ch) icedcc_putc(ch)
#define flush() do { } while (0)
#endif
static void putstr(const char *ptr)
{
char c;
while ((c = *ptr++) != '\0') {
if (c == '\n')
putc('\r');
putc(c);
}
flush();
}
#endif

View File

@ -20,6 +20,7 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/rtc.h>
#include <asm/rtc.h>
#include <asm/semaphore.h>
@ -42,89 +43,6 @@ static struct rtc_ops *rtc_ops;
#define rtc_epoch 1900UL
static const unsigned char days_in_month[] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
#define LEAPS_THRU_END_OF(y) ((y)/4 - (y)/100 + (y)/400)
#define LEAP_YEAR(year) ((!(year % 4) && (year % 100)) || !(year % 400))
static int month_days(unsigned int month, unsigned int year)
{
return days_in_month[month] + (LEAP_YEAR(year) && month == 1);
}
/*
* Convert seconds since 01-01-1970 00:00:00 to Gregorian date.
*/
void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
{
int days, month, year;
days = time / 86400;
time -= days * 86400;
tm->tm_wday = (days + 4) % 7;
year = 1970 + days / 365;
days -= (year - 1970) * 365
+ LEAPS_THRU_END_OF(year - 1)
- LEAPS_THRU_END_OF(1970 - 1);
if (days < 0) {
year -= 1;
days += 365 + LEAP_YEAR(year);
}
tm->tm_year = year - 1900;
tm->tm_yday = days + 1;
for (month = 0; month < 11; month++) {
int newdays;
newdays = days - month_days(month, year);
if (newdays < 0)
break;
days = newdays;
}
tm->tm_mon = month;
tm->tm_mday = days + 1;
tm->tm_hour = time / 3600;
time -= tm->tm_hour * 3600;
tm->tm_min = time / 60;
tm->tm_sec = time - tm->tm_min * 60;
}
EXPORT_SYMBOL(rtc_time_to_tm);
/*
* Does the rtc_time represent a valid date/time?
*/
int rtc_valid_tm(struct rtc_time *tm)
{
if (tm->tm_year < 70 ||
tm->tm_mon >= 12 ||
tm->tm_mday < 1 ||
tm->tm_mday > month_days(tm->tm_mon, tm->tm_year + 1900) ||
tm->tm_hour >= 24 ||
tm->tm_min >= 60 ||
tm->tm_sec >= 60)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(rtc_valid_tm);
/*
* Convert Gregorian date to seconds since 01-01-1970 00:00:00.
*/
int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
{
*time = mktime(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
return 0;
}
EXPORT_SYMBOL(rtc_tm_to_time);
/*
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
@ -151,13 +69,13 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc
}
}
static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm)
static inline int rtc_arm_read_time(struct rtc_ops *ops, struct rtc_time *tm)
{
memset(tm, 0, sizeof(struct rtc_time));
return ops->read_time(tm);
}
static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm)
static inline int rtc_arm_set_time(struct rtc_ops *ops, struct rtc_time *tm)
{
int ret;
@ -168,7 +86,7 @@ static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm)
return ret;
}
static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
static inline int rtc_arm_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
{
int ret = -EINVAL;
if (ops->read_alarm) {
@ -178,7 +96,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
return ret;
}
static inline int rtc_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
static inline int rtc_arm_set_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm)
{
int ret = -EINVAL;
if (ops->set_alarm)
@ -266,7 +184,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
switch (cmd) {
case RTC_ALM_READ:
ret = rtc_read_alarm(ops, &alrm);
ret = rtc_arm_read_alarm(ops, &alrm);
if (ret)
break;
ret = copy_to_user(uarg, &alrm.time, sizeof(tm));
@ -288,11 +206,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
alrm.time.tm_wday = -1;
alrm.time.tm_yday = -1;
alrm.time.tm_isdst = -1;
ret = rtc_set_alarm(ops, &alrm);
ret = rtc_arm_set_alarm(ops, &alrm);
break;
case RTC_RD_TIME:
ret = rtc_read_time(ops, &tm);
ret = rtc_arm_read_time(ops, &tm);
if (ret)
break;
ret = copy_to_user(uarg, &tm, sizeof(tm));
@ -310,7 +228,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
ret = -EFAULT;
break;
}
ret = rtc_set_time(ops, &tm);
ret = rtc_arm_set_time(ops, &tm);
break;
case RTC_EPOCH_SET:
@ -341,11 +259,11 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
ret = -EFAULT;
break;
}
ret = rtc_set_alarm(ops, &alrm);
ret = rtc_arm_set_alarm(ops, &alrm);
break;
case RTC_WKALM_RD:
ret = rtc_read_alarm(ops, &alrm);
ret = rtc_arm_read_alarm(ops, &alrm);
if (ret)
break;
ret = copy_to_user(uarg, &alrm, sizeof(alrm));
@ -435,7 +353,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo
struct rtc_time tm;
char *p = page;
if (rtc_read_time(ops, &tm) == 0) {
if (rtc_arm_read_time(ops, &tm) == 0) {
p += sprintf(p,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
@ -445,7 +363,7 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo
rtc_epoch);
}
if (rtc_read_alarm(ops, &alrm) == 0) {
if (rtc_arm_read_alarm(ops, &alrm) == 0) {
p += sprintf(p, "alrm_time\t: ");
if ((unsigned int)alrm.time.tm_hour <= 24)
p += sprintf(p, "%02d:", alrm.time.tm_hour);

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($CONFIG_PREEMPT,y)
ifeq ($(CONFIG_PREEMPT),y)
lib-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)

View File

@ -236,7 +236,7 @@
/*
* Abort preanble and completion macros.
* Abort preamble and completion macros.
* If a fixup handler is required then those macros must surround it.
* It is assumed that the fixup code will handle the private part of
* the exit macro.

View File

@ -30,7 +30,9 @@
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/delay.h>
#include <linux/termios.h>
#include <linux/amba/bus.h>
#include <linux/amba/serial.h>
#include <asm/types.h>
#include <asm/setup.h>
@ -360,6 +362,68 @@ void __init ep93xx_init_irq(void)
/*************************************************************************
* EP93xx peripheral handling
*************************************************************************/
#define EP93XX_UART_MCR_OFFSET (0x0100)
static void ep93xx_uart_set_mctrl(struct amba_device *dev,
void __iomem *base, unsigned int mctrl)
{
unsigned int mcr;
mcr = 0;
if (!(mctrl & TIOCM_RTS))
mcr |= 2;
if (!(mctrl & TIOCM_DTR))
mcr |= 1;
__raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
}
static struct amba_pl010_data ep93xx_uart_data = {
.set_mctrl = ep93xx_uart_set_mctrl,
};
static struct amba_device uart1_device = {
.dev = {
.bus_id = "apb:uart1",
.platform_data = &ep93xx_uart_data,
},
.res = {
.start = EP93XX_UART1_PHYS_BASE,
.end = EP93XX_UART1_PHYS_BASE + 0x0fff,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_EP93XX_UART1, NO_IRQ },
.periphid = 0x00041010,
};
static struct amba_device uart2_device = {
.dev = {
.bus_id = "apb:uart2",
.platform_data = &ep93xx_uart_data,
},
.res = {
.start = EP93XX_UART2_PHYS_BASE,
.end = EP93XX_UART2_PHYS_BASE + 0x0fff,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_EP93XX_UART2, NO_IRQ },
.periphid = 0x00041010,
};
static struct amba_device uart3_device = {
.dev = {
.bus_id = "apb:uart3",
.platform_data = &ep93xx_uart_data,
},
.res = {
.start = EP93XX_UART3_PHYS_BASE,
.end = EP93XX_UART3_PHYS_BASE + 0x0fff,
.flags = IORESOURCE_MEM,
},
.irq = { IRQ_EP93XX_UART3, NO_IRQ },
.periphid = 0x00041010,
};
void __init ep93xx_init_devices(void)
{
unsigned int v;
@ -371,4 +435,8 @@ void __init ep93xx_init_devices(void)
v &= ~EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
__raw_writel(v, EP93XX_SYSCON_DEVICE_CONFIG);
amba_device_register(&uart1_device, &iomem_resource);
amba_device_register(&uart2_device, &iomem_resource);
amba_device_register(&uart3_device, &iomem_resource);
}

View File

@ -34,27 +34,12 @@ static int rtc_base;
static unsigned long __init get_isa_cmos_time(void)
{
unsigned int year, mon, day, hour, min, sec;
int i;
// check to see if the RTC makes sense.....
if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0)
return mktime(1970, 1, 1, 0, 0, 0);
/* The Linux interpretation of the CMOS clock register contents:
* When the Update-In-Progress (UIP) flag goes from 1 to 0, the
* RTC registers show the second which has precisely just started.
* Let's hope other operating systems interpret the RTC the same way.
*/
/* read RTC exactly on falling edge of update flag */
for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
break;
for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */
if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
break;
do { /* Isn't this overkill ? UIP above should guarantee consistency */
do {
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
hour = CMOS_READ(RTC_HOURS);

View File

@ -15,7 +15,9 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/termios.h>
#include <linux/amba/bus.h>
#include <linux/amba/serial.h>
#include <asm/hardware.h>
#include <asm/irq.h>
@ -28,6 +30,8 @@
#include "common.h"
static struct amba_pl010_data integrator_uart_data;
static struct amba_device rtc_device = {
.dev = {
.bus_id = "mb:15",
@ -44,6 +48,7 @@ static struct amba_device rtc_device = {
static struct amba_device uart0_device = {
.dev = {
.bus_id = "mb:16",
.platform_data = &integrator_uart_data,
},
.res = {
.start = INTEGRATOR_UART0_BASE,
@ -57,6 +62,7 @@ static struct amba_device uart0_device = {
static struct amba_device uart1_device = {
.dev = {
.bus_id = "mb:17",
.platform_data = &integrator_uart_data,
},
.res = {
.start = INTEGRATOR_UART1_BASE,
@ -115,6 +121,46 @@ static int __init integrator_init(void)
arch_initcall(integrator_init);
/*
* On the Integrator platform, the port RTS and DTR are provided by
* bits in the following SC_CTRLS register bits:
* RTS DTR
* UART0 7 6
* UART1 5 4
*/
#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
static void integrator_uart_set_mctrl(struct amba_device *dev, void __iomem *base, unsigned int mctrl)
{
unsigned int ctrls = 0, ctrlc = 0, rts_mask, dtr_mask;
if (dev == &uart0_device) {
rts_mask = 1 << 4;
dtr_mask = 1 << 5;
} else {
rts_mask = 1 << 6;
dtr_mask = 1 << 7;
}
if (mctrl & TIOCM_RTS)
ctrlc |= rts_mask;
else
ctrls |= rts_mask;
if (mctrl & TIOCM_DTR)
ctrlc |= dtr_mask;
else
ctrls |= dtr_mask;
__raw_writel(ctrls, SC_CTRLS);
__raw_writel(ctrlc, SC_CTRLC);
}
static struct amba_pl010_data integrator_uart_data = {
.set_mctrl = integrator_uart_set_mctrl,
};
#define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_BASE) + INTEGRATOR_HDR_CTRL_OFFSET
static DEFINE_SPINLOCK(cm_lock);

View File

@ -40,13 +40,13 @@ static int integrator_set_rtc(void)
return 1;
}
static int rtc_read_alarm(struct rtc_wkalrm *alrm)
static int integrator_rtc_read_alarm(struct rtc_wkalrm *alrm)
{
rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time);
return 0;
}
static inline int rtc_set_alarm(struct rtc_wkalrm *alrm)
static inline int integrator_rtc_set_alarm(struct rtc_wkalrm *alrm)
{
unsigned long time;
int ret;
@ -62,7 +62,7 @@ static inline int rtc_set_alarm(struct rtc_wkalrm *alrm)
return ret;
}
static int rtc_read_time(struct rtc_time *tm)
static int integrator_rtc_read_time(struct rtc_time *tm)
{
rtc_time_to_tm(readl(rtc_base + RTC_DR), tm);
return 0;
@ -76,7 +76,7 @@ static int rtc_read_time(struct rtc_time *tm)
* edge of the 1Hz clock, we must write the time one second
* in advance.
*/
static inline int rtc_set_time(struct rtc_time *tm)
static inline int integrator_rtc_set_time(struct rtc_time *tm)
{
unsigned long time;
int ret;
@ -90,10 +90,10 @@ static inline int rtc_set_time(struct rtc_time *tm)
static struct rtc_ops rtc_ops = {
.owner = THIS_MODULE,
.read_time = rtc_read_time,
.set_time = rtc_set_time,
.read_alarm = rtc_read_alarm,
.set_alarm = rtc_set_alarm,
.read_time = integrator_rtc_read_time,
.set_time = integrator_rtc_set_time,
.read_alarm = integrator_rtc_read_alarm,
.set_alarm = integrator_rtc_set_alarm,
};
static irqreturn_t arm_rtc_interrupt(int irq, void *dev_id,

View File

@ -0,0 +1,25 @@
if ARCH_IXP23XX
config ARCH_SUPPORTS_BIG_ENDIAN
bool
default y
menu "Intel IXP23xx Implementation Options"
comment "IXP23xx Platforms"
config MACH_ESPRESSO
bool "Support IP Fabrics Double Espresso platform"
help
config MACH_IXDP2351
bool "Support Intel IXDP2351 platform"
help
config MACH_ROADRUNNER
bool "Support ADI RoadRunner platform"
help
endmenu
endif

View File

@ -0,0 +1,11 @@
#
# Makefile for the linux kernel.
#
obj-y := core.o pci.o
obj-m :=
obj-n :=
obj- :=
obj-$(CONFIG_MACH_ESPRESSO) += espresso.o
obj-$(CONFIG_MACH_IXDP2351) += ixdp2351.o
obj-$(CONFIG_MACH_ROADRUNNER) += roadrunner.o

View File

@ -0,0 +1,2 @@
zreladdr-y := 0x00008000
params_phys-y := 0x00000100

View File

@ -0,0 +1,431 @@
/*
* arch/arm/mach-ixp23xx/core.c
*
* Core routines for IXP23xx chips
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Based on 2.4 code Copyright 2004 (c) Intel Corporation
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
/*************************************************************************
* Chip specific mappings shared by all IXP23xx systems
*************************************************************************/
static struct map_desc ixp23xx_io_desc[] __initdata = {
{ /* XSI-CPP CSRs */
.virtual = IXP23XX_XSI2CPP_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_XSI2CPP_CSR_PHYS),
.length = IXP23XX_XSI2CPP_CSR_SIZE,
.type = MT_DEVICE,
}, { /* Expansion Bus Config */
.virtual = IXP23XX_EXP_CFG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_EXP_CFG_PHYS),
.length = IXP23XX_EXP_CFG_SIZE,
.type = MT_DEVICE,
}, { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACS,.... */
.virtual = IXP23XX_PERIPHERAL_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PERIPHERAL_PHYS),
.length = IXP23XX_PERIPHERAL_SIZE,
.type = MT_DEVICE,
}, { /* CAP CSRs */
.virtual = IXP23XX_CAP_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_CAP_CSR_PHYS),
.length = IXP23XX_CAP_CSR_SIZE,
.type = MT_DEVICE,
}, { /* MSF CSRs */
.virtual = IXP23XX_MSF_CSR_VIRT,
.pfn = __phys_to_pfn(IXP23XX_MSF_CSR_PHYS),
.length = IXP23XX_MSF_CSR_SIZE,
.type = MT_DEVICE,
}, { /* PCI I/O Space */
.virtual = IXP23XX_PCI_IO_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_IO_PHYS),
.length = IXP23XX_PCI_IO_SIZE,
.type = MT_DEVICE,
}, { /* PCI Config Space */
.virtual = IXP23XX_PCI_CFG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_CFG_PHYS),
.length = IXP23XX_PCI_CFG_SIZE,
.type = MT_DEVICE,
}, { /* PCI local CFG CSRs */
.virtual = IXP23XX_PCI_CREG_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_CREG_PHYS),
.length = IXP23XX_PCI_CREG_SIZE,
.type = MT_DEVICE,
}, { /* PCI MEM Space */
.virtual = IXP23XX_PCI_MEM_VIRT,
.pfn = __phys_to_pfn(IXP23XX_PCI_MEM_PHYS),
.length = IXP23XX_PCI_MEM_SIZE,
.type = MT_DEVICE,
}
};
void __init ixp23xx_map_io(void)
{
iotable_init(ixp23xx_io_desc, ARRAY_SIZE(ixp23xx_io_desc));
}
/***************************************************************************
* IXP23xx Interrupt Handling
***************************************************************************/
enum ixp23xx_irq_type {
IXP23XX_IRQ_LEVEL, IXP23XX_IRQ_EDGE
};
static void ixp23xx_config_irq(unsigned int, enum ixp23xx_irq_type);
static int ixp23xx_irq_set_type(unsigned int irq, unsigned int type)
{
int line = irq - IRQ_IXP23XX_GPIO6 + 6;
u32 int_style;
enum ixp23xx_irq_type irq_type;
volatile u32 *int_reg;
/*
* Only GPIOs 6-15 are wired to interrupts on IXP23xx
*/
if (line < 6 || line > 15)
return -EINVAL;
switch (type) {
case IRQT_BOTHEDGE:
int_style = IXP23XX_GPIO_STYLE_TRANSITIONAL;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_RISING:
int_style = IXP23XX_GPIO_STYLE_RISING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_FALLING:
int_style = IXP23XX_GPIO_STYLE_FALLING_EDGE;
irq_type = IXP23XX_IRQ_EDGE;
break;
case IRQT_HIGH:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_HIGH;
irq_type = IXP23XX_IRQ_LEVEL;
break;
case IRQT_LOW:
int_style = IXP23XX_GPIO_STYLE_ACTIVE_LOW;
irq_type = IXP23XX_IRQ_LEVEL;
break;
default:
return -EINVAL;
}
ixp23xx_config_irq(irq, irq_type);
if (line >= 8) { /* pins 8-15 */
line -= 8;
int_reg = (volatile u32 *)IXP23XX_GPIO_GPIT2R;
} else { /* pins 0-7 */
int_reg = (volatile u32 *)IXP23XX_GPIO_GPIT1R;
}
/*
* Clear pending interrupts
*/
*IXP23XX_GPIO_GPISR = (1 << line);
/* Clear the style for the appropriate pin */
*int_reg &= ~(IXP23XX_GPIO_STYLE_MASK <<
(line * IXP23XX_GPIO_STYLE_SIZE));
/* Set the new style */
*int_reg |= (int_style << (line * IXP23XX_GPIO_STYLE_SIZE));
return 0;
}
static void ixp23xx_irq_mask(unsigned int irq)
{
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg &= ~(1 << (irq % 32));
}
static void ixp23xx_irq_ack(unsigned int irq)
{
int line = irq - IRQ_IXP23XX_GPIO6 + 6;
if ((line < 6) || (line > 15))
return;
*IXP23XX_GPIO_GPISR = (1 << line);
}
/*
* Level triggered interrupts on GPIO lines can only be cleared when the
* interrupt condition disappears.
*/
static void ixp23xx_irq_level_unmask(unsigned int irq)
{
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
ixp23xx_irq_ack(irq);
*intr_reg |= (1 << (irq % 32));
}
static void ixp23xx_irq_edge_unmask(unsigned int irq)
{
volatile unsigned long *intr_reg = IXP23XX_INTR_EN1 + (irq / 32);
*intr_reg |= (1 << (irq % 32));
}
static struct irqchip ixp23xx_irq_level_chip = {
.ack = ixp23xx_irq_mask,
.mask = ixp23xx_irq_mask,
.unmask = ixp23xx_irq_level_unmask,
.set_type = ixp23xx_irq_set_type
};
static struct irqchip ixp23xx_irq_edge_chip = {
.ack = ixp23xx_irq_ack,
.mask = ixp23xx_irq_mask,
.unmask = ixp23xx_irq_edge_unmask,
.set_type = ixp23xx_irq_set_type
};
static void ixp23xx_pci_irq_mask(unsigned int irq)
{
*IXP23XX_PCI_XSCALE_INT_ENABLE &= ~(1 << (IRQ_IXP23XX_INTA + 27 - irq));
}
static void ixp23xx_pci_irq_unmask(unsigned int irq)
{
*IXP23XX_PCI_XSCALE_INT_ENABLE |= (1 << (IRQ_IXP23XX_INTA + 27 - irq));
}
/*
* TODO: Should this just be done at ASM level?
*/
static void pci_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
u32 pci_interrupt;
unsigned int irqno;
struct irqdesc *int_desc;
pci_interrupt = *IXP23XX_PCI_XSCALE_INT_STATUS;
desc->chip->ack(irq);
/* See which PCI_INTA, or PCI_INTB interrupted */
if (pci_interrupt & (1 << 26)) {
irqno = IRQ_IXP23XX_INTB;
} else if (pci_interrupt & (1 << 27)) {
irqno = IRQ_IXP23XX_INTA;
} else {
BUG();
}
int_desc = irq_desc + irqno;
int_desc->handle(irqno, int_desc, regs);
desc->chip->unmask(irq);
}
static struct irqchip ixp23xx_pci_irq_chip = {
.ack = ixp23xx_pci_irq_mask,
.mask = ixp23xx_pci_irq_mask,
.unmask = ixp23xx_pci_irq_unmask
};
static void ixp23xx_config_irq(unsigned int irq, enum ixp23xx_irq_type type)
{
switch (type) {
case IXP23XX_IRQ_LEVEL:
set_irq_chip(irq, &ixp23xx_irq_level_chip);
set_irq_handler(irq, do_level_IRQ);
break;
case IXP23XX_IRQ_EDGE:
set_irq_chip(irq, &ixp23xx_irq_edge_chip);
set_irq_handler(irq, do_edge_IRQ);
break;
}
set_irq_flags(irq, IRQF_VALID);
}
void __init ixp23xx_init_irq(void)
{
int irq;
/* Route everything to IRQ */
*IXP23XX_INTR_SEL1 = 0x0;
*IXP23XX_INTR_SEL2 = 0x0;
*IXP23XX_INTR_SEL3 = 0x0;
*IXP23XX_INTR_SEL4 = 0x0;
/* Mask all sources */
*IXP23XX_INTR_EN1 = 0x0;
*IXP23XX_INTR_EN2 = 0x0;
*IXP23XX_INTR_EN3 = 0x0;
*IXP23XX_INTR_EN4 = 0x0;
/*
* Configure all IRQs for level-sensitive operation
*/
for (irq = 0; irq <= NUM_IXP23XX_RAW_IRQS; irq++) {
ixp23xx_config_irq(irq, IXP23XX_IRQ_LEVEL);
}
for (irq = IRQ_IXP23XX_INTA; irq <= IRQ_IXP23XX_INTB; irq++) {
set_irq_chip(irq, &ixp23xx_pci_irq_chip);
set_irq_handler(irq, do_level_IRQ);
set_irq_flags(irq, IRQF_VALID);
}
set_irq_chained_handler(IRQ_IXP23XX_PCI_INT_RPH, pci_handler);
}
/*************************************************************************
* Timer-tick functions for IXP23xx
*************************************************************************/
#define CLOCK_TICKS_PER_USEC CLOCK_TICK_RATE / (USEC_PER_SEC)
static unsigned long next_jiffy_time;
static unsigned long
ixp23xx_gettimeoffset(void)
{
unsigned long elapsed;
elapsed = *IXP23XX_TIMER_CONT - (next_jiffy_time - LATCH);
return elapsed / CLOCK_TICKS_PER_USEC;
}
static irqreturn_t
ixp23xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
/* Clear Pending Interrupt by writing '1' to it */
*IXP23XX_TIMER_STATUS = IXP23XX_TIMER1_INT_PEND;
while ((*IXP23XX_TIMER_CONT - next_jiffy_time) > LATCH) {
timer_tick(regs);
next_jiffy_time += LATCH;
}
return IRQ_HANDLED;
}
static struct irqaction ixp23xx_timer_irq = {
.name = "IXP23xx Timer Tick",
.handler = ixp23xx_timer_interrupt,
.flags = SA_INTERRUPT | SA_TIMER,
};
void __init ixp23xx_init_timer(void)
{
/* Clear Pending Interrupt by writing '1' to it */
*IXP23XX_TIMER_STATUS = IXP23XX_TIMER1_INT_PEND;
/* Setup the Timer counter value */
*IXP23XX_TIMER1_RELOAD =
(LATCH & ~IXP23XX_TIMER_RELOAD_MASK) | IXP23XX_TIMER_ENABLE;
*IXP23XX_TIMER_CONT = 0;
next_jiffy_time = LATCH;
/* Connect the interrupt handler and enable the interrupt */
setup_irq(IRQ_IXP23XX_TIMER1, &ixp23xx_timer_irq);
}
struct sys_timer ixp23xx_timer = {
.init = ixp23xx_init_timer,
.offset = ixp23xx_gettimeoffset,
};
/*************************************************************************
* IXP23xx Platform Initializaion
*************************************************************************/
static struct resource ixp23xx_uart_resources[] = {
{
.start = IXP23XX_UART1_PHYS,
.end = IXP23XX_UART1_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
}, {
.start = IXP23XX_UART2_PHYS,
.end = IXP23XX_UART2_PHYS + 0x0fff,
.flags = IORESOURCE_MEM
}
};
static struct plat_serial8250_port ixp23xx_uart_data[] = {
{
.mapbase = IXP23XX_UART1_PHYS,
.membase = (char *)(IXP23XX_UART1_VIRT + 3),
.irq = IRQ_IXP23XX_UART1,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP23XX_UART_XTAL,
}, {
.mapbase = IXP23XX_UART2_PHYS,
.membase = (char *)(IXP23XX_UART2_VIRT + 3),
.irq = IRQ_IXP23XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP23XX_UART_XTAL,
},
{ },
};
static struct platform_device ixp23xx_uart = {
.name = "serial8250",
.id = 0,
.dev.platform_data = ixp23xx_uart_data,
.num_resources = 2,
.resource = ixp23xx_uart_resources,
};
static struct platform_device *ixp23xx_devices[] __initdata = {
&ixp23xx_uart,
};
void __init ixp23xx_sys_init(void)
{
platform_add_devices(ixp23xx_devices, ARRAY_SIZE(ixp23xx_devices));
}

View File

@ -0,0 +1,69 @@
/*
* arch/arm/mach-ixp23xx/espresso.c
*
* Double Espresso-specific routines
*
* Author: Lennert Buytenhek <buytenh@wantstofly.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/mtd/physmap.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/pci.h>
static void __init espresso_init(void)
{
physmap_configure(0x90000000, 0x02000000, 2, NULL);
/*
* Mark flash as writeable.
*/
IXP23XX_EXP_CS0[0] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[1] |= IXP23XX_FLASH_WRITABLE;
ixp23xx_sys_init();
}
MACHINE_START(ESPRESSO, "IP Fabrics Double Espresso")
/* Maintainer: Lennert Buytenhek */
.phys_io = IXP23XX_PERIPHERAL_PHYS,
.io_pg_offst = ((IXP23XX_PERIPHERAL_VIRT >> 18)) & 0xfffc,
.map_io = ixp23xx_map_io,
.init_irq = ixp23xx_init_irq,
.timer = &ixp23xx_timer,
.boot_params = 0x00000100,
.init_machine = espresso_init,
MACHINE_END

View File

@ -0,0 +1,325 @@
/*
* arch/arm/mach-ixp23xx/ixdp2351.c
*
* IXDP2351 board-specific routines
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Based on 2.4 code Copyright 2004 (c) Intel Corporation
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/mtd/physmap.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/pci.h>
/*
* IXDP2351 Interrupt Handling
*/
static void ixdp2351_inta_mask(unsigned int irq)
{
*IXDP2351_CPLD_INTA_MASK_SET_REG = IXDP2351_INTA_IRQ_MASK(irq);
}
static void ixdp2351_inta_unmask(unsigned int irq)
{
*IXDP2351_CPLD_INTA_MASK_CLR_REG = IXDP2351_INTA_IRQ_MASK(irq);
}
static void ixdp2351_inta_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
u16 ex_interrupt =
*IXDP2351_CPLD_INTA_STAT_REG & IXDP2351_INTA_IRQ_VALID;
int i;
desc->chip->mask(irq);
for (i = 0; i < IXDP2351_INTA_IRQ_NUM; i++) {
if (ex_interrupt & (1 << i)) {
struct irqdesc *cpld_desc;
int cpld_irq =
IXP23XX_MACH_IRQ(IXDP2351_INTA_IRQ_BASE + i);
cpld_desc = irq_desc + cpld_irq;
cpld_desc->handle(cpld_irq, cpld_desc, regs);
}
}
desc->chip->unmask(irq);
}
static struct irqchip ixdp2351_inta_chip = {
.ack = ixdp2351_inta_mask,
.mask = ixdp2351_inta_mask,
.unmask = ixdp2351_inta_unmask
};
static void ixdp2351_intb_mask(unsigned int irq)
{
*IXDP2351_CPLD_INTB_MASK_SET_REG = IXDP2351_INTB_IRQ_MASK(irq);
}
static void ixdp2351_intb_unmask(unsigned int irq)
{
*IXDP2351_CPLD_INTB_MASK_CLR_REG = IXDP2351_INTB_IRQ_MASK(irq);
}
static void ixdp2351_intb_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
u16 ex_interrupt =
*IXDP2351_CPLD_INTB_STAT_REG & IXDP2351_INTB_IRQ_VALID;
int i;
desc->chip->ack(irq);
for (i = 0; i < IXDP2351_INTB_IRQ_NUM; i++) {
if (ex_interrupt & (1 << i)) {
struct irqdesc *cpld_desc;
int cpld_irq =
IXP23XX_MACH_IRQ(IXDP2351_INTB_IRQ_BASE + i);
cpld_desc = irq_desc + cpld_irq;
cpld_desc->handle(cpld_irq, cpld_desc, regs);
}
}
desc->chip->unmask(irq);
}
static struct irqchip ixdp2351_intb_chip = {
.ack = ixdp2351_intb_mask,
.mask = ixdp2351_intb_mask,
.unmask = ixdp2351_intb_unmask
};
void ixdp2351_init_irq(void)
{
int irq;
/* Mask all interrupts from CPLD, disable simulation */
*IXDP2351_CPLD_INTA_MASK_SET_REG = (u16) -1;
*IXDP2351_CPLD_INTB_MASK_SET_REG = (u16) -1;
*IXDP2351_CPLD_INTA_SIM_REG = 0;
*IXDP2351_CPLD_INTB_SIM_REG = 0;
ixp23xx_init_irq();
for (irq = IXP23XX_MACH_IRQ(IXDP2351_INTA_IRQ_BASE);
irq <
IXP23XX_MACH_IRQ(IXDP2351_INTA_IRQ_BASE + IXDP2351_INTA_IRQ_NUM);
irq++) {
if (IXDP2351_INTA_IRQ_MASK(irq) & IXDP2351_INTA_IRQ_VALID) {
set_irq_flags(irq, IRQF_VALID);
set_irq_handler(irq, do_level_IRQ);
set_irq_chip(irq, &ixdp2351_inta_chip);
}
}
for (irq = IXP23XX_MACH_IRQ(IXDP2351_INTB_IRQ_BASE);
irq <
IXP23XX_MACH_IRQ(IXDP2351_INTB_IRQ_BASE + IXDP2351_INTB_IRQ_NUM);
irq++) {
if (IXDP2351_INTB_IRQ_MASK(irq) & IXDP2351_INTB_IRQ_VALID) {
set_irq_flags(irq, IRQF_VALID);
set_irq_handler(irq, do_level_IRQ);
set_irq_chip(irq, &ixdp2351_intb_chip);
}
}
set_irq_chained_handler(IRQ_IXP23XX_INTA, &ixdp2351_inta_handler);
set_irq_chained_handler(IRQ_IXP23XX_INTB, &ixdp2351_intb_handler);
}
/*
* IXDP2351 PCI
*/
/*
* This board does not do normal PCI IRQ routing, or any
* sort of swizzling, so we just need to check where on the
* bus the device is and figure out what CPLD pin it is
* being routed to.
*/
#define DEVPIN(dev, pin) ((pin) | ((dev) << 3))
static int __init ixdp2351_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
u8 bus = dev->bus->number;
u32 devpin = DEVPIN(PCI_SLOT(dev->devfn), pin);
struct pci_bus *tmp_bus = dev->bus;
/* Primary bus, no interrupts here */
if (!bus)
return -1;
/* Lookup first leaf in bus tree */
while ((tmp_bus->parent != NULL) && (tmp_bus->parent->parent != NULL))
tmp_bus = tmp_bus->parent;
/* Select between known bridges */
switch (tmp_bus->self->devfn | (tmp_bus->self->bus->number << 8)) {
/* Device is located after first bridge */
case 0x0008:
if (tmp_bus == dev->bus) {
/* Device is located directy after first bridge */
switch (devpin) {
/* Onboard 82546 */
case DEVPIN(1, 1): /* Onboard 82546 ch 0 */
return IRQ_IXDP2351_INTA_82546;
case DEVPIN(1, 2): /* Onboard 82546 ch 1 */
return IRQ_IXDP2351_INTB_82546;
/* PMC SLOT */
case DEVPIN(0, 1): /* PMCP INTA# */
case DEVPIN(2, 4): /* PMCS INTD# */
return IRQ_IXDP2351_SPCI_PMC_INTA;
case DEVPIN(0, 2): /* PMCP INTB# */
case DEVPIN(2, 1): /* PMCS INTA# */
return IRQ_IXDP2351_SPCI_PMC_INTB;
case DEVPIN(0, 3): /* PMCP INTC# */
case DEVPIN(2, 2): /* PMCS INTB# */
return IRQ_IXDP2351_SPCI_PMC_INTC;
case DEVPIN(0, 4): /* PMCP INTD# */
case DEVPIN(2, 3): /* PMCS INTC# */
return IRQ_IXDP2351_SPCI_PMC_INTD;
}
} else {
/* Device is located indirectly after first bridge */
/* Not supported now */
return -1;
}
break;
case 0x0010:
if (tmp_bus == dev->bus) {
/* Device is located directy after second bridge */
/* Secondary bus of second bridge */
switch (devpin) {
case DEVPIN(0, 1): /* DB#0 */
case DEVPIN(0, 2):
case DEVPIN(0, 3):
case DEVPIN(0, 4):
return IRQ_IXDP2351_SPCI_DB_0;
case DEVPIN(1, 1): /* DB#1 */
case DEVPIN(1, 2):
case DEVPIN(1, 3):
case DEVPIN(1, 4):
return IRQ_IXDP2351_SPCI_DB_1;
case DEVPIN(2, 1): /* FIC1 */
case DEVPIN(2, 2):
case DEVPIN(2, 3):
case DEVPIN(2, 4):
case DEVPIN(3, 1): /* FIC2 */
case DEVPIN(3, 2):
case DEVPIN(3, 3):
case DEVPIN(3, 4):
return IRQ_IXDP2351_SPCI_FIC;
}
} else {
/* Device is located indirectly after second bridge */
/* Not supported now */
return -1;
}
break;
}
return -1;
}
struct hw_pci ixdp2351_pci __initdata = {
.nr_controllers = 1,
.preinit = ixp23xx_pci_preinit,
.setup = ixp23xx_pci_setup,
.scan = ixp23xx_pci_scan_bus,
.map_irq = ixdp2351_map_irq,
};
int __init ixdp2351_pci_init(void)
{
if (machine_is_ixdp2351())
pci_common_init(&ixdp2351_pci);
return 0;
}
subsys_initcall(ixdp2351_pci_init);
/*
* IXDP2351 Static Mapped I/O
*/
static struct map_desc ixdp2351_io_desc[] __initdata = {
{
.virtual = IXDP2351_NP_VIRT_BASE,
.pfn = __phys_to_pfn((u64)IXDP2351_NP_PHYS_BASE),
.length = IXDP2351_NP_PHYS_SIZE,
.type = MT_DEVICE
}, {
.virtual = IXDP2351_BB_BASE_VIRT,
.pfn = __phys_to_pfn((u64)IXDP2351_BB_BASE_PHYS),
.length = IXDP2351_BB_SIZE,
.type = MT_DEVICE
}
};
static void __init ixdp2351_map_io(void)
{
ixp23xx_map_io();
iotable_init(ixdp2351_io_desc, ARRAY_SIZE(ixdp2351_io_desc));
}
static void __init ixdp2351_init(void)
{
physmap_configure(0x90000000, 0x04000000, 1, NULL);
/*
* Mark flash as writeable
*/
IXP23XX_EXP_CS0[0] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[1] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[2] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[3] |= IXP23XX_FLASH_WRITABLE;
ixp23xx_sys_init();
}
MACHINE_START(IXDP2351, "Intel IXDP2351 Development Platform")
/* Maintainer: MontaVista Software, Inc. */
.phys_io = IXP23XX_PERIPHERAL_PHYS,
.io_pg_offst = ((IXP23XX_PERIPHERAL_VIRT >> 18)) & 0xfffc,
.map_io = ixdp2351_map_io,
.init_irq = ixdp2351_init_irq,
.timer = &ixp23xx_timer,
.boot_params = 0x00000100,
.init_machine = ixdp2351_init,
MACHINE_END

View File

@ -0,0 +1,275 @@
/*
* arch/arm/mach-ixp23xx/pci.c
*
* PCI routines for IXP23XX based systems
*
* Copyright (c) 2005 MontaVista Software, Inc.
*
* based on original code:
*
* Author: Naeem Afzal <naeem.m.afzal@intel.com>
* Copyright 2002-2005 Intel Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
extern int (*external_fault) (unsigned long, struct pt_regs *);
static int pci_master_aborts = 0;
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
int clear_master_aborts(void);
static u32
*ixp23xx_pci_config_addr(unsigned int bus_nr, unsigned int devfn, int where)
{
u32 *paddress;
/*
* Must be dword aligned
*/
where &= ~3;
/*
* For top bus, generate type 0, else type 1
*/
if (!bus_nr) {
if (PCI_SLOT(devfn) >= 8)
return 0;
paddress = (u32 *) (IXP23XX_PCI_CFG0_VIRT
| (1 << (PCI_SLOT(devfn) + 16))
| (PCI_FUNC(devfn) << 8) | where);
} else {
paddress = (u32 *) (IXP23XX_PCI_CFG1_VIRT
| (bus_nr << 16)
| (PCI_SLOT(devfn) << 11)
| (PCI_FUNC(devfn) << 8) | where);
}
return paddress;
}
/*
* Mask table, bits to mask for quantity of size 1, 2 or 4 bytes.
* 0 and 3 are not valid indexes...
*/
static u32 bytemask[] = {
/*0*/ 0,
/*1*/ 0xff,
/*2*/ 0xffff,
/*3*/ 0,
/*4*/ 0xffffffff,
};
static int ixp23xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
u32 n;
u32 *addr;
n = where % 4;
DBG("In config_read(%d) %d from dev %d:%d:%d\n", size, where,
bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
addr = ixp23xx_pci_config_addr(bus->number, devfn, where);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
pci_master_aborts = 0;
*value = (*addr >> (8*n)) & bytemask[size];
if (pci_master_aborts) {
pci_master_aborts = 0;
*value = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
return PCIBIOS_SUCCESSFUL;
}
/*
* We don't do error checking on the address for writes.
* It's assumed that the user checked for the device existing first
* by doing a read first.
*/
static int ixp23xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
u32 mask;
u32 *addr;
u32 temp;
mask = ~(bytemask[size] << ((where % 0x4) * 8));
addr = ixp23xx_pci_config_addr(bus->number, devfn, where);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
temp = (u32) (value) << ((where % 0x4) * 8);
*addr = (*addr & mask) | temp;
clear_master_aborts();
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops ixp23xx_pci_ops = {
.read = ixp23xx_pci_read_config,
.write = ixp23xx_pci_write_config,
};
struct pci_bus *ixp23xx_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
{
return pci_scan_bus(sysdata->busnr, &ixp23xx_pci_ops, sysdata);
}
int ixp23xx_pci_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
volatile unsigned long temp;
unsigned long flags;
pci_master_aborts = 1;
local_irq_save(flags);
temp = *IXP23XX_PCI_CONTROL;
/*
* master abort and cmd tgt err
*/
if (temp & ((1 << 8) | (1 << 5)))
*IXP23XX_PCI_CONTROL = temp;
temp = *IXP23XX_PCI_CMDSTAT;
if (temp & (1 << 29))
*IXP23XX_PCI_CMDSTAT = temp;
local_irq_restore(flags);
/*
* If it was an imprecise abort, then we need to correct the
* return address to be _after_ the instruction.
*/
if (fsr & (1 << 10))
regs->ARM_pc += 4;
return 0;
}
int clear_master_aborts(void)
{
volatile u32 temp;
temp = *IXP23XX_PCI_CONTROL;
/*
* master abort and cmd tgt err
*/
if (temp & ((1 << 8) | (1 << 5)))
*IXP23XX_PCI_CONTROL = temp;
temp = *IXP23XX_PCI_CMDSTAT;
if (temp & (1 << 29))
*IXP23XX_PCI_CMDSTAT = temp;
return 0;
}
void __init ixp23xx_pci_preinit(void)
{
#ifdef __ARMEB__
*IXP23XX_PCI_CONTROL |= 0x20000; /* set I/O swapping */
#endif
/*
* ADDR_31 needs to be clear for PCI memory access to CPP memory
*/
*IXP23XX_CPP2XSI_CURR_XFER_REG3 &= ~IXP23XX_CPP2XSI_ADDR_31;
*IXP23XX_CPP2XSI_CURR_XFER_REG3 |= IXP23XX_CPP2XSI_PSH_OFF;
/*
* Select correct memory for PCI inbound transactions
*/
if (ixp23xx_cpp_boot()) {
*IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1);
} else {
*IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1);
}
hook_fault_code(16+6, ixp23xx_pci_abort_handler, SIGBUS,
"PCI config cycle to non-existent device");
*IXP23XX_PCI_ADDR_EXT = 0x0000e000;
}
/*
* Prevent PCI layer from seeing the inbound host-bridge resources
*/
static void __devinit pci_fixup_ixp23xx(struct pci_dev *dev)
{
int i;
dev->class &= 0xff;
dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
dev->resource[i].start = 0;
dev->resource[i].end = 0;
dev->resource[i].flags = 0;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9002, pci_fixup_ixp23xx);
/*
* IXP2300 systems often have large resource requirements, so we just
* use our own resource space.
*/
static struct resource ixp23xx_pci_mem_space = {
.start = IXP23XX_PCI_MEM_START,
.end = IXP23XX_PCI_MEM_START + IXP23XX_PCI_MEM_SIZE - 1,
.flags = IORESOURCE_MEM,
.name = "PCI Mem Space"
};
static struct resource ixp23xx_pci_io_space = {
.start = 0x00000100,
.end = 0x01ffffff,
.flags = IORESOURCE_IO,
.name = "PCI I/O Space"
};
int ixp23xx_pci_setup(int nr, struct pci_sys_data *sys)
{
if (nr >= 1)
return 0;
sys->resource[0] = &ixp23xx_pci_io_space;
sys->resource[1] = &ixp23xx_pci_mem_space;
sys->resource[2] = NULL;
return 1;
}

View File

@ -0,0 +1,164 @@
/*
* arch/arm/mach-ixp23xx/roadrunner.c
*
* RoadRunner board-specific routines
*
* Author: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2005 (c) MontaVista Software, Inc.
*
* Based on 2.4 code Copyright 2005 (c) ADI Engineering Corporation
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/mtd/physmap.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/pci.h>
/*
* Interrupt mapping
*/
#define INTA IRQ_ROADRUNNER_PCI_INTA
#define INTB IRQ_ROADRUNNER_PCI_INTB
#define INTC IRQ_ROADRUNNER_PCI_INTC
#define INTD IRQ_ROADRUNNER_PCI_INTD
#define INTC_PIN IXP23XX_GPIO_PIN_11
#define INTD_PIN IXP23XX_GPIO_PIN_12
static int __init roadrunner_map_irq(struct pci_dev *dev, u8 idsel, u8 pin)
{
static int pci_card_slot_irq[] = {INTB, INTC, INTD, INTA};
static int pmc_card_slot_irq[] = {INTA, INTB, INTC, INTD};
static int usb_irq[] = {INTB, INTC, INTD, -1};
static int mini_pci_1_irq[] = {INTB, INTC, -1, -1};
static int mini_pci_2_irq[] = {INTC, INTD, -1, -1};
switch(dev->bus->number) {
case 0:
switch(dev->devfn) {
case 0x0: // PCI-PCI bridge
break;
case 0x8: // PCI Card Slot
return pci_card_slot_irq[pin - 1];
case 0x10: // PMC Slot
return pmc_card_slot_irq[pin - 1];
case 0x18: // PMC Slot Secondary Agent
break;
case 0x20: // IXP Processor
break;
default:
return NO_IRQ;
}
break;
case 1:
switch(dev->devfn) {
case 0x0: // IDE Controller
return (pin == 1) ? INTC : -1;
case 0x8: // USB fun 0
case 0x9: // USB fun 1
case 0xa: // USB fun 2
return usb_irq[pin - 1];
case 0x10: // Mini PCI 1
return mini_pci_1_irq[pin-1];
case 0x18: // Mini PCI 2
return mini_pci_2_irq[pin-1];
case 0x20: // MEM slot
return (pin == 1) ? INTA : -1;
default:
return NO_IRQ;
}
break;
default:
return NO_IRQ;
}
return NO_IRQ;
}
static void roadrunner_pci_preinit(void)
{
set_irq_type(IRQ_ROADRUNNER_PCI_INTC, IRQT_LOW);
set_irq_type(IRQ_ROADRUNNER_PCI_INTD, IRQT_LOW);
ixp23xx_pci_preinit();
}
static struct hw_pci roadrunner_pci __initdata = {
.nr_controllers = 1,
.preinit = roadrunner_pci_preinit,
.setup = ixp23xx_pci_setup,
.scan = ixp23xx_pci_scan_bus,
.map_irq = roadrunner_map_irq,
};
static int __init roadrunner_pci_init(void)
{
if (machine_is_roadrunner())
pci_common_init(&roadrunner_pci);
return 0;
};
subsys_initcall(roadrunner_pci_init);
static void __init roadrunner_init(void)
{
physmap_configure(0x90000000, 0x04000000, 2, NULL);
/*
* Mark flash as writeable
*/
IXP23XX_EXP_CS0[0] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[1] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[2] |= IXP23XX_FLASH_WRITABLE;
IXP23XX_EXP_CS0[3] |= IXP23XX_FLASH_WRITABLE;
ixp23xx_sys_init();
}
MACHINE_START(ROADRUNNER, "ADI Engineering RoadRunner Development Platform")
/* Maintainer: Deepak Saxena */
.phys_io = IXP23XX_PERIPHERAL_PHYS,
.io_pg_offst = ((IXP23XX_PERIPHERAL_VIRT >> 18)) & 0xfffc,
.map_io = ixp23xx_map_io,
.init_irq = ixp23xx_init_irq,
.timer = &ixp23xx_timer,
.boot_params = 0x00000100,
.init_machine = roadrunner_init,
MACHINE_END

View File

@ -141,7 +141,7 @@ static int __init netstar_late_init(void)
/* TODO: Setup front panel switch here */
/* Setup panic notifier */
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}

View File

@ -235,7 +235,7 @@ static struct notifier_block panic_block = {
static int __init voiceblue_setup(void)
{
/* Setup panic notifier */
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}

View File

@ -10,6 +10,11 @@ config ARCH_LUBBOCK
select PXA25x
select SA1111
config MACH_LOGICPD_PXA270
bool "LogicPD PXA270 Card Engine Development Platform"
select PXA27x
select IWMMXT
config MACH_MAINSTONE
bool "Intel HCDDBBVA0 Development Platform"
select PXA27x

View File

@ -9,6 +9,7 @@ obj-$(CONFIG_PXA27x) += pxa27x.o
# Specific board support
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
obj-$(CONFIG_MACH_LOGICPD_PXA270) += lpd270.o
obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o sharpsl_pm.o corgi_pm.o

View File

@ -319,6 +319,11 @@ void __init pxa_set_ficp_info(struct pxaficp_platform_data *info)
pxaficp_device.dev.platform_data = info;
}
static struct platform_device pxartc_device = {
.name = "sa1100-rtc",
.id = -1,
};
static struct platform_device *devices[] __initdata = {
&pxamci_device,
&udc_device,
@ -329,6 +334,7 @@ static struct platform_device *devices[] __initdata = {
&pxaficp_device,
&i2c_device,
&i2s_device,
&pxartc_device,
};
static int __init pxa_init(void)

View File

@ -0,0 +1,393 @@
/*
* linux/arch/arm/mach-pxa/lpd270.c
*
* Support for the LogicPD PXA270 Card Engine.
* Derived from the mainstone code, which carries these notices:
*
* Author: Nicolas Pitre
* Created: Nov 05, 2002
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sysdev.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/flash.h>
#include <asm/arch/pxa-regs.h>
#include <asm/arch/lpd270.h>
#include <asm/arch/audio.h>
#include <asm/arch/pxafb.h>
#include <asm/arch/mmc.h>
#include <asm/arch/irda.h>
#include <asm/arch/ohci.h>
#include "generic.h"
static unsigned int lpd270_irq_enabled;
static void lpd270_mask_irq(unsigned int irq)
{
int lpd270_irq = irq - LPD270_IRQ(0);
__raw_writew(~(1 << lpd270_irq), LPD270_INT_STATUS);
lpd270_irq_enabled &= ~(1 << lpd270_irq);
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static void lpd270_unmask_irq(unsigned int irq)
{
int lpd270_irq = irq - LPD270_IRQ(0);
lpd270_irq_enabled |= 1 << lpd270_irq;
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
}
static struct irqchip lpd270_irq_chip = {
.ack = lpd270_mask_irq,
.mask = lpd270_mask_irq,
.unmask = lpd270_unmask_irq,
};
static void lpd270_irq_handler(unsigned int irq, struct irqdesc *desc,
struct pt_regs *regs)
{
unsigned long pending;
pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
do {
GEDR(0) = GPIO_bit(0); /* clear useless edge notification */
if (likely(pending)) {
irq = LPD270_IRQ(0) + __ffs(pending);
desc = irq_desc + irq;
desc_handle_irq(irq, desc, regs);
pending = __raw_readw(LPD270_INT_STATUS) &
lpd270_irq_enabled;
}
} while (pending);
}
static void __init lpd270_init_irq(void)
{
int irq;
pxa_init_irq();
__raw_writew(0, LPD270_INT_MASK);
__raw_writew(0, LPD270_INT_STATUS);
/* setup extra LogicPD PXA270 irqs */
for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) {
set_irq_chip(irq, &lpd270_irq_chip);
set_irq_handler(irq, do_level_IRQ);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
set_irq_chained_handler(IRQ_GPIO(0), lpd270_irq_handler);
set_irq_type(IRQ_GPIO(0), IRQT_FALLING);
}
#ifdef CONFIG_PM
static int lpd270_irq_resume(struct sys_device *dev)
{
__raw_writew(lpd270_irq_enabled, LPD270_INT_MASK);
return 0;
}
static struct sysdev_class lpd270_irq_sysclass = {
set_kset_name("cpld_irq"),
.resume = lpd270_irq_resume,
};
static struct sys_device lpd270_irq_device = {
.cls = &lpd270_irq_sysclass,
};
static int __init lpd270_irq_device_init(void)
{
int ret = sysdev_class_register(&lpd270_irq_sysclass);
if (ret == 0)
ret = sysdev_register(&lpd270_irq_device);
return ret;
}
device_initcall(lpd270_irq_device_init);
#endif
static struct resource smc91x_resources[] = {
[0] = {
.start = LPD270_ETH_PHYS,
.end = (LPD270_ETH_PHYS + 0xfffff),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = LPD270_ETHERNET_IRQ,
.end = LPD270_ETHERNET_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
};
static struct platform_device lpd270_audio_device = {
.name = "pxa2xx-ac97",
.id = -1,
};
static struct resource lpd270_flash_resources[] = {
[0] = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PXA_CS1_PHYS,
.end = PXA_CS1_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct mtd_partition lpd270_flash0_partitions[] = {
{
.name = "Bootloader",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE /* force read-only */
}, {
.name = "Kernel",
.size = 0x00400000,
.offset = 0x00040000,
}, {
.name = "Filesystem",
.size = MTDPART_SIZ_FULL,
.offset = 0x00440000
},
};
static struct flash_platform_data lpd270_flash_data[2] = {
{
.name = "processor-flash",
.map_name = "cfi_probe",
.parts = lpd270_flash0_partitions,
.nr_parts = ARRAY_SIZE(lpd270_flash0_partitions),
}, {
.name = "mainboard-flash",
.map_name = "cfi_probe",
.parts = NULL,
.nr_parts = 0,
}
};
static struct platform_device lpd270_flash_device[2] = {
{
.name = "pxa2xx-flash",
.id = 0,
.dev = {
.platform_data = &lpd270_flash_data[0],
},
.resource = &lpd270_flash_resources[0],
.num_resources = 1,
}, {
.name = "pxa2xx-flash",
.id = 1,
.dev = {
.platform_data = &lpd270_flash_data[1],
},
.resource = &lpd270_flash_resources[1],
.num_resources = 1,
},
};
static void lpd270_backlight_power(int on)
{
if (on) {
pxa_gpio_mode(GPIO16_PWM0_MD);
pxa_set_cken(CKEN0_PWM0, 1);
PWM_CTRL0 = 0;
PWM_PWDUTY0 = 0x3ff;
PWM_PERVAL0 = 0x3ff;
} else {
PWM_CTRL0 = 0;
PWM_PWDUTY0 = 0x0;
PWM_PERVAL0 = 0x3FF;
pxa_set_cken(CKEN0_PWM0, 0);
}
}
/* 5.7" TFT QVGA (LoLo display number 1) */
static struct pxafb_mach_info sharp_lq057q3dc02 __initdata = {
.pixclock = 100000,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 64,
.left_margin = 0x27,
.right_margin = 0x09,
.vsync_len = 0x04,
.upper_margin = 0x08,
.lower_margin = 0x14,
.sync = 0,
.lccr0 = 0x07800080,
.lccr3 = 0x04400007,
.pxafb_backlight_power = lpd270_backlight_power,
};
/* 6.4" TFT VGA (LoLo display number 5) */
static struct pxafb_mach_info sharp_lq64d343 __initdata = {
.pixclock = 20000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 49,
.left_margin = 0x89,
.right_margin = 0x19,
.vsync_len = 18,
.upper_margin = 0x22,
.lower_margin = 0,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.lccr0 = 0x07800080,
.lccr3 = 0x04400001,
.pxafb_backlight_power = lpd270_backlight_power,
};
/* 3.5" TFT QVGA (LoLo display number 8) */
static struct pxafb_mach_info sharp_lq035q7db02_20 __initdata = {
.pixclock = 100000,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 0x34,
.left_margin = 0x09,
.right_margin = 0x09,
.vsync_len = 0x08,
.upper_margin = 0x05,
.lower_margin = 0x14,
.sync = 0,
.lccr0 = 0x07800080,
.lccr3 = 0x04400007,
.pxafb_backlight_power = lpd270_backlight_power,
};
static struct platform_device *platform_devices[] __initdata = {
&smc91x_device,
&lpd270_audio_device,
&lpd270_flash_device[0],
&lpd270_flash_device[1],
};
static int lpd270_ohci_init(struct device *dev)
{
/* setup Port1 GPIO pin. */
pxa_gpio_mode(88 | GPIO_ALT_FN_1_IN); /* USBHPWR1 */
pxa_gpio_mode(89 | GPIO_ALT_FN_2_OUT); /* USBHPEN1 */
/* Set the Power Control Polarity Low and Power Sense
Polarity Low to active low. */
UHCHR = (UHCHR | UHCHR_PCPL | UHCHR_PSPL) &
~(UHCHR_SSEP1 | UHCHR_SSEP2 | UHCHR_SSEP3 | UHCHR_SSE);
return 0;
}
static struct pxaohci_platform_data lpd270_ohci_platform_data = {
.port_mode = PMM_PERPORT_MODE,
.init = lpd270_ohci_init,
};
static void __init lpd270_init(void)
{
lpd270_flash_data[0].width = (BOOT_DEF & 1) ? 2 : 4;
lpd270_flash_data[1].width = 4;
/*
* System bus arbiter setting:
* - Core_Park
* - LCD_wt:DMA_wt:CORE_Wt = 2:3:4
*/
ARB_CNTRL = ARB_CORE_PARK | 0x234;
/*
* On LogicPD PXA270, we route AC97_SYSCLK via GPIO45.
*/
pxa_gpio_mode(GPIO45_SYSCLK_AC97_MD);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
// set_pxa_fb_info(&sharp_lq057q3dc02);
set_pxa_fb_info(&sharp_lq64d343);
// set_pxa_fb_info(&sharp_lq035q7db02_20);
pxa_set_ohci_info(&lpd270_ohci_platform_data);
}
static struct map_desc lpd270_io_desc[] __initdata = {
{
.virtual = LPD270_CPLD_VIRT,
.pfn = __phys_to_pfn(LPD270_CPLD_PHYS),
.length = LPD270_CPLD_SIZE,
.type = MT_DEVICE,
},
};
static void __init lpd270_map_io(void)
{
pxa_map_io();
iotable_init(lpd270_io_desc, ARRAY_SIZE(lpd270_io_desc));
/* initialize sleep mode regs (wake-up sources, etc) */
PGSR0 = 0x00008800;
PGSR1 = 0x00000002;
PGSR2 = 0x0001FC00;
PGSR3 = 0x00001F81;
PWER = 0xC0000002;
PRER = 0x00000002;
PFER = 0x00000002;
/* for use I SRAM as framebuffer. */
PSLR |= 0x00000F04;
PCFR = 0x00000066;
}
MACHINE_START(LOGICPD_PXA270, "LogicPD PXA270 Card Engine")
/* Maintainer: Peter Barada */
.phys_io = 0x40000000,
.io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
.boot_params = 0xa0000100,
.map_io = lpd270_map_io,
.init_irq = lpd270_init_irq,
.timer = &pxa_timer,
.init_machine = lpd270_init,
MACHINE_END

View File

@ -467,6 +467,8 @@ struct platform_device akitaioexp_device = {
.id = -1,
};
EXPORT_SYMBOL_GPL(akitaioexp_device);
static void __init akita_init(void)
{
spitz_ficp_platform_data.transceiver_mode = akita_irda_transceiver_mode;

View File

@ -111,7 +111,7 @@ config SA1100_LART
bool "LART"
help
Say Y here if you are using the Linux Advanced Radio Terminal
(also known as the LART). See <http://www.lart.tudelft.nl/> for
(also known as the LART). See <http://www.lartmaker.nl/> for
information on the LART.
config SA1100_PLEB

View File

@ -11,7 +11,7 @@
* linux-2.4.5-rmk1
*
* This software has been developed while working on the LART
* computing board (http://www.lart.tudelft.nl/), which is
* computing board (http://www.lartmaker.nl/), which is
* sponsored by the Mobile Multi-media Communications
* (http://www.mmc.tudelft.nl/) and Ubiquitous Communications
* (http://www.ubicom.tudelft.nl/) projects.

View File

@ -324,6 +324,11 @@ void sa11x0_set_irda_data(struct irda_platform_data *irda)
sa11x0ir_device.dev.platform_data = irda;
}
static struct platform_device sa11x0rtc_device = {
.name = "sa1100-rtc",
.id = -1,
};
static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0udc_device,
&sa11x0uart1_device,
@ -333,6 +338,7 @@ static struct platform_device *sa11x0_devices[] __initdata = {
&sa11x0pcmcia_device,
&sa11x0fb_device,
&sa11x0mtd_device,
&sa11x0rtc_device,
};
static int __init sa1100_init(void)

View File

@ -239,6 +239,17 @@ config CPU_XSCALE
select CPU_CACHE_VIVT
select CPU_TLB_V4WBI
# XScale Core Version 3
config CPU_XSC3
bool
depends on ARCH_IXP23XX
default y
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_CACHE_VIVT
select CPU_TLB_V4WBI
select IO_36
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
@ -361,11 +372,17 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
bool
#
# CPU supports 36-bit I/O
#
config IO_36
bool
comment "Processor Features"
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6
depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
default y
help
Say Y if you want to include kernel support for running user space

View File

@ -30,6 +30,7 @@ obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
@ -51,4 +52,5 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
obj-$(CONFIG_CPU_V6) += proc-v6.o

View File

@ -0,0 +1,97 @@
/*
* linux/arch/arm/lib/copypage-xsc3.S
*
* Copyright (C) 2004 Intel Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Adapted for 3rd gen XScale core, no more mini-dcache
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
/*
* General note:
* We don't really want write-allocate cache behaviour for these functions
* since that will just eat through 8K of the cache.
*/
.text
.align 5
/*
* XSC3 optimised copy_user_page
* r0 = destination
* r1 = source
* r2 = virtual user address of ultimate destination page
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
*/
ENTRY(xsc3_mc_copy_user_page)
stmfd sp!, {r4, r5, lr}
mov lr, #PAGE_SZ/64-1
pld [r1, #0]
pld [r1, #32]
1: pld [r1, #64]
pld [r1, #96]
2: ldrd r2, [r1], #8
mov ip, r0
ldrd r4, [r1], #8
mcr p15, 0, ip, c7, c6, 1 @ invalidate
strd r2, [r0], #8
ldrd r2, [r1], #8
strd r4, [r0], #8
ldrd r4, [r1], #8
strd r2, [r0], #8
strd r4, [r0], #8
ldrd r2, [r1], #8
mov ip, r0
ldrd r4, [r1], #8
mcr p15, 0, ip, c7, c6, 1 @ invalidate
strd r2, [r0], #8
ldrd r2, [r1], #8
subs lr, lr, #1
strd r4, [r0], #8
ldrd r4, [r1], #8
strd r2, [r0], #8
strd r4, [r0], #8
bgt 1b
beq 2b
ldmfd sp!, {r4, r5, pc}
.align 5
/*
* XScale optimised clear_user_page
* r0 = destination
* r1 = virtual user address of ultimate destination page
*/
ENTRY(xsc3_mc_clear_user_page)
mov r1, #PAGE_SZ/32
mov r2, #0
mov r3, #0
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
subs r1, r1, #1
bne 1b
mov pc, lr
__INITDATA
.type xsc3_mc_user_fns, #object
ENTRY(xsc3_mc_user_fns)
.long xsc3_mc_clear_user_page
.long xsc3_mc_copy_user_page
.size xsc3_mc_user_fns, . - xsc3_mc_user_fns

View File

@ -557,7 +557,8 @@ void __init create_mapping(struct map_desc *md)
* supersections are only allocated for domain 0 regardless
* of the actual domain assignments in use.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
&& domain == 0) {
/*
* Align to supersection boundary if !high pages.
* High pages have already been checked for proper

View File

@ -21,6 +21,14 @@
#define D_CACHE_LINE_SIZE 32
#define TTB_C (1 << 0)
#define TTB_S (1 << 1)
#define TTB_IMP (1 << 2)
#define TTB_RGN_NC (0 << 3)
#define TTB_RGN_WBWA (1 << 3)
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
.macro cpsie, flags
.ifc \flags, f
.long 0xf1080040
@ -115,7 +123,7 @@ ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
#ifdef CONFIG_SMP
orr r0, r0, #2 @ set shared pgtable
orr r0, r0, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
#endif
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
@ -161,8 +169,8 @@ ENTRY(cpu_v6_set_pte)
tst r1, #L_PTE_YOUNG
biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK
@ tst r1, #L_PTE_EXEC
@ orreq r2, r2, #PTE_EXT_XN
tst r1, #L_PTE_EXEC
orreq r2, r2, #PTE_EXT_XN
tst r1, #L_PTE_PRESENT
moveq r2, #0
@ -221,7 +229,7 @@ __v6_setup:
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
#ifdef CONFIG_SMP
orr r4, r4, #2 @ set shared pgtable
orr r4, r4, #TTB_RGN_WBWA|TTB_S @ mark PTWs shared, outer cacheable
#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#ifdef CONFIG_VFP

View File

@ -0,0 +1,498 @@
/*
* linux/arch/arm/mm/proc-xsc3.S
*
* Original Author: Matthew Gilbert
* Current Maintainer: Deepak Saxena <dsaxena@plexity.net>
*
* Copyright 2004 (C) Intel Corp.
* Copyright 2005 (c) MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an
* extension to Intel's original XScale core that adds the following
* features:
*
* - ARMv6 Supersections
* - Low Locality Reference pages (replaces mini-cache)
* - 36-bit addressing
* - L2 cache
* - Cache-coherency if chipset supports it
*
* Based on orignal XScale code by Nicolas Pitre
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* This is the maximum size of an area which will be flushed. If the
* area is larger than this, then we flush the whole cache.
*/
#define MAX_AREA_SIZE 32768
/*
* The cache line size of the I and D cache.
*/
#define CACHELINESIZE 32
/*
* The size of the data cache.
*/
#define CACHESIZE 32768
/*
* Run with L2 enabled.
*/
#define L2_CACHE_ENABLE 1
/*
* Enable the Branch Target Buffer (can cause crashes, see erratum #42.)
*/
#define BTB_ENABLE 0
/*
* This macro is used to wait for a CP15 write and is needed
* when we have to ensure that the last operation to the co-pro
* was completed before continuing with operation.
*/
.macro cpwait_ret, lr, rd
mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
sub pc, \lr, \rd, LSR #32 @ wait for completion and
@ flush instruction pipeline
.endm
/*
* This macro cleans & invalidates the entire xsc3 dcache by set & way.
*/
.macro clean_d_cache rd, rs
mov \rd, #0x1f00
orr \rd, \rd, #0x00e0
1: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way
adds \rd, \rd, #0x40000000
bcc 1b
subs \rd, \rd, #0x20
bpl 1b
.endm
.text
/*
* cpu_xsc3_proc_init()
*
* Nothing too exciting at the moment
*/
ENTRY(cpu_xsc3_proc_init)
mov pc, lr
/*
* cpu_xsc3_proc_fin()
*/
ENTRY(cpu_xsc3_proc_fin)
str lr, [sp, #-4]!
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0
bl xsc3_flush_kern_cache_all @ clean caches
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1800 @ ...IZ...........
bic r0, r0, #0x0006 @ .............CA.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldr pc, [sp], #4
/*
* cpu_xsc3_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x0086 @ ........B....CA.
bic r1, r1, #0x3900 @ ..VIZ..S........
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
bic r1, r1, #0x0001 @ ...............M
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
@ CAUTION: MMU turned off from this point. We count on the pipeline
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, r0
/*
* cpu_xsc3_do_idle()
*
* Cause the processor to idle
*
* For now we do nothing but go to idle mode for every case
*
* XScale supports clock switching, but using idle mode support
* allows external hardware to react to system state changes.
MMG: Come back to this one.
*/
.align 5
ENTRY(cpu_xsc3_do_idle)
mov r0, #1
mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
mov pc, lr
/* ================================= CACHE ================================ */
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(xsc3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(xsc3_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
clean_d_cache r0, r1
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
.align 5
ENTRY(xsc3_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #MAX_AREA_SIZE
bhs __flush_whole_cache
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*
* Note: single I-cache line invalidation isn't used here since
* it also trashes the mini I-cache used by JTAG debuggers.
*/
ENTRY(xsc3_coherent_kern_range)
/* FALLTHROUGH */
ENTRY(xsc3_coherent_user_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(xsc3_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(xsc3_dma_inv_range)
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry
mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry
tst r1, #CACHELINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry
mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry
mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(xsc3_dma_clean_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry
mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(xsc3_dma_flush_range)
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line
mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line
mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
add r0, r0, #CACHELINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
mov pc, lr
ENTRY(xsc3_cache_fns)
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
.long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
.long xsc3_flush_kern_dcache_page
.long xsc3_dma_inv_range
.long xsc3_dma_clean_range
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
subs r1, r1, #CACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_xsc3_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_xsc3_switch_mm)
clean_d_cache r1, r2
mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
#ifdef L2_CACHE_ENABLE
orr r0, r0, #0x18 @ cache the page table in L2
#endif
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
cpwait_ret lr, ip
/*
* cpu_xsc3_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*
*/
.align 5
ENTRY(cpu_xsc3_set_pte)
str r1, [r0], #-2048 @ linux version
bic r2, r1, #0xff0
orr r2, r2, #PTE_TYPE_EXT @ extended page
eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
tst r3, #L_PTE_USER @ User?
orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
@ combined with user -> user r/w
#if L2_CACHE_ENABLE
@ If its cacheable it needs to be in L2 also.
eor ip, r1, #L_PTE_CACHEABLE
tst ip, #L_PTE_CACHEABLE
orreq r2, r2, #PTE_EXT_TEX(0x5)
#endif
tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0 @ no -> fault
str r2, [r0] @ hardware version
mov ip, #0
mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr
mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
mov pc, lr
.ltorg
.align
__INIT
.type __xsc3_setup, #function
__xsc3_setup:
mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
#if L2_CACHE_ENABLE
orr r4, r4, #0x18 @ cache the page table in L2
#endif
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
mov r0, #1 @ Allow access to CP0 and CP13
orr r0, r0, #1 << 13 @ Its undefined whether this
mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
and r0, r0, #2 @ preserve bit P bit setting
#if L2_CACHE_ENABLE
orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
#endif
mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
mrc p15, 0, r0, c1, c0, 0 @ get control register
bic r0, r0, #0x0200 @ .... ..R. .... ....
bic r0, r0, #0x0002 @ .... .... .... ..A.
orr r0, r0, #0x0005 @ .... .... .... .C.M
#if BTB_ENABLE
orr r0, r0, #0x3900 @ ..VI Z..S .... ....
#else
orr r0, r0, #0x3100 @ ..VI ...S .... ....
#endif
#if L2_CACHE_ENABLE
orr r0, r0, #0x4000000 @ L2 enable
#endif
mov pc, lr
.size __xsc3_setup, . - __xsc3_setup
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type xsc3_processor_functions, #object
ENTRY(xsc3_processor_functions)
.word v5t_early_abort
.word cpu_xsc3_proc_init
.word cpu_xsc3_proc_fin
.word cpu_xsc3_reset
.word cpu_xsc3_do_idle
.word cpu_xsc3_dcache_clean_area
.word cpu_xsc3_switch_mm
.word cpu_xsc3_set_pte
.size xsc3_processor_functions, . - xsc3_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_xsc3_name, #object
cpu_xsc3_name:
.asciz "XScale-Core3"
.size cpu_xsc3_name, . - cpu_xsc3_name
.align
.section ".proc.info.init", #alloc, #execinstr
.type __xsc3_proc_info,#object
__xsc3_proc_info:
.long 0x69056000
.long 0xffffe000
.long 0x00000c0e
b __xsc3_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_xsc3_name
.long xsc3_processor_functions
.long v4wbi_tlb_fns
.long xsc3_mc_user_fns
.long xsc3_cache_fns
.size __xsc3_proc_info, . - __xsc3_proc_info

View File

@ -23,6 +23,7 @@
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/blkdev.h>
#include <linux/pfn.h>
#include <asm/segment.h>
#include <asm/mach-types.h>
@ -101,12 +102,6 @@ struct node_info {
int bootmap_pages;
};
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT)
#define PFN_SIZE(x) ((x) >> PAGE_SHIFT)
#define PFN_RANGE(s,e) PFN_SIZE(PAGE_ALIGN((unsigned long)(e)) - \
(((unsigned long)(s)) & PAGE_MASK))
/*
* FIXME: We really want to avoid allocating the bootmap bitmap
* over the top of the initrd. Hopefully, this is located towards

View File

@ -18,6 +18,7 @@
#include <linux/seq_file.h>
#include <linux/tty.h>
#include <linux/utsname.h>
#include <linux/pfn.h>
#include <asm/setup.h>
@ -88,10 +89,6 @@ setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
/* min_low_pfn points to the start of DRAM, start_pfn points
* to the first DRAM pages after the kernel, and max_low_pfn
* to the end of DRAM.

View File

@ -231,6 +231,15 @@ config SCHED_SMT
cost of slightly increased overhead in some places. If unsure say
N here.
config SCHED_MC
bool "Multi-core scheduler support"
depends on SMP
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt"
config X86_UP_APIC

View File

@ -1924,6 +1924,7 @@ skip10: movb %ah, %al
ret
store_edid:
#ifdef CONFIG_FB_FIRMWARE_EDID
pushw %es # just save all registers
pushw %ax
pushw %bx
@ -1954,6 +1955,7 @@ store_edid:
popw %bx
popw %ax
popw %es
#endif
ret
# VIDEO_SELECT-only variables

View File

@ -266,7 +266,7 @@ static void __init early_cpu_detect(void)
void __cpuinit generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
int junk;
int ebx;
if (have_cpuid_p()) {
/* Get vendor name */
@ -282,7 +282,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
/* Intel-defined flags: level 0x00000001 */
if ( c->cpuid_level >= 0x00000001 ) {
u32 capability, excap;
cpuid(0x00000001, &tfms, &junk, &excap, &capability);
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
c->x86_capability[0] = capability;
c->x86_capability[4] = excap;
c->x86 = (tfms >> 8) & 15;
@ -292,6 +292,11 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
#ifdef CONFIG_SMP
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
#else
c->apicid = (ebx >> 24) & 0xFF;
#endif
} else {
/* Have CPUID level 0 only - unheard of */
c->x86 = 4;
@ -474,7 +479,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
cpuid(1, &eax, &ebx, &ecx, &edx);
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;

View File

@ -1095,10 +1095,15 @@ static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
static unsigned int powernowk8_get (unsigned int cpu)
{
struct powernow_k8_data *data = powernow_data[cpu];
struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0;
data = powernow_data[first_cpu(cpu_core_map[cpu])];
if (!data)
return -EINVAL;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) {
printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);

View File

@ -182,10 +182,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
#ifndef for_each_cpu_mask
#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{

View File

@ -173,6 +173,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
#ifdef CONFIG_SMP
unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
#endif
if (c->cpuid_level > 3) {
static int is_initialized;
@ -205,9 +209,15 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
break;
case 2:
new_l2 = this_leaf.size/1024;
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
index_msb = get_count_order(num_threads_sharing);
l2_id = c->apicid >> index_msb;
break;
case 3:
new_l3 = this_leaf.size/1024;
num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
index_msb = get_count_order(num_threads_sharing);
l3_id = c->apicid >> index_msb;
break;
default:
break;
@ -215,11 +225,19 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
}
}
}
if (c->cpuid_level > 1) {
/*
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
* trace cache
*/
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
/* supports eax=2 call */
int i, j, n;
int regs[4];
unsigned char *dp = (unsigned char *)regs;
int only_trace = 0;
if (num_cache_leaves != 0 && c->x86 == 15)
only_trace = 1;
/* Number of times to iterate */
n = cpuid_eax(2) & 0xFF;
@ -241,6 +259,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
while (cache_table[k].descriptor != 0)
{
if (cache_table[k].descriptor == des) {
if (only_trace && cache_table[k].cache_type != LVL_TRACE)
break;
switch (cache_table[k].cache_type) {
case LVL_1_INST:
l1i += cache_table[k].size;
@ -266,35 +286,46 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
}
}
}
if (new_l1d)
l1d = new_l1d;
if (new_l1i)
l1i = new_l1i;
if (new_l2)
l2 = new_l2;
if (new_l3)
l3 = new_l3;
if ( trace )
printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
else if ( l1i )
printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
if ( l1d )
printk(", L1 D cache: %dK\n", l1d);
else
printk("\n");
if ( l2 )
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
if ( l3 )
printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
}
if (new_l1d)
l1d = new_l1d;
if (new_l1i)
l1i = new_l1i;
if (new_l2) {
l2 = new_l2;
#ifdef CONFIG_SMP
cpu_llc_id[cpu] = l2_id;
#endif
}
if (new_l3) {
l3 = new_l3;
#ifdef CONFIG_SMP
cpu_llc_id[cpu] = l3_id;
#endif
}
if (trace)
printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
else if ( l1i )
printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
if (l1d)
printk(", L1 D cache: %dK\n", l1d);
else
printk("\n");
if (l2)
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
if (l3)
printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2;
}

View File

@ -381,7 +381,7 @@ static void do_irq_balance(void)
unsigned long imbalance = 0;
cpumask_t allowed_mask, target_cpu_mask, tmp;
for_each_cpu(i) {
for_each_possible_cpu(i) {
int package_index;
CPU_IRQ(i) = 0;
if (!cpu_online(i))
@ -632,7 +632,7 @@ static int __init balanced_irq_init(void)
else
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
failed:
for_each_cpu(i) {
for_each_possible_cpu(i) {
kfree(irq_cpu_data[i].irq_delta);
irq_cpu_data[i].irq_delta = NULL;
kfree(irq_cpu_data[i].last_irq);

View File

@ -459,26 +459,9 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_
return ret;
}
static int microcode_ioctl (struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
/*
* XXX: will be removed after microcode_ctl
* is updated to ignore failure of this ioctl()
*/
case MICROCODE_IOCFREE:
return 0;
default:
return -EINVAL;
}
return -EINVAL;
}
static struct file_operations microcode_fops = {
.owner = THIS_MODULE,
.write = microcode_write,
.ioctl = microcode_ioctl,
.open = microcode_open,
};

View File

@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
for_each_cpu(cpu)
for_each_possible_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
for_each_cpu(cpu) {
for_each_possible_cpu(cpu) {
#ifdef CONFIG_SMP
/* Check cpu_callin_map here because that is set
after the timer is started. */
@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
* Just reset the alert counters, (other CPUs might be
* spinning on locks we hold):
*/
for_each_cpu(i)
for_each_possible_cpu(i)
alert_counter[i] = 0;
/*
@ -529,7 +529,8 @@ void nmi_watchdog_tick (struct pt_regs * regs)
* always switch the stack NMI-atomically, it's safe to use
* smp_processor_id().
*/
int sum, cpu = smp_processor_id();
unsigned int sum;
int cpu = smp_processor_id();
sum = per_cpu(irq_stat, cpu).apic_timer_irqs;

View File

@ -46,6 +46,7 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
#include <video/edid.h>

View File

@ -72,6 +72,9 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* Core ID of each logical CPU */
int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* Last level cache ID of each logical CPU */
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
@ -440,6 +443,18 @@ static void __devinit smp_callin(void)
static int cpucount;
/* maps the cpu to the sched domain representing multi-core */
cpumask_t cpu_coregroup_map(int cpu)
{
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
* TBD: when power saving sched policy is added, we will return
* cpu_core_map when power saving policy is enabled
*/
return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
@ -459,12 +474,16 @@ set_cpu_sibling_map(int cpu)
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
}
} else {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
cpu_set(cpu, c[cpu].llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
@ -472,6 +491,11 @@ set_cpu_sibling_map(int cpu)
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_llc_id[cpu] != BAD_APICID &&
cpu_llc_id[cpu] == cpu_llc_id[i]) {
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);

View File

@ -310,3 +310,5 @@ ENTRY(sys_call_table)
.long sys_pselect6
.long sys_ppoll
.long sys_unshare /* 310 */
.long sys_set_robust_list
.long sys_get_robust_list

View File

@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/types.h>
#include <asm/timer.h>
#include <asm/smp.h>
@ -45,24 +46,31 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
static int pmtmr_need_workaround __read_mostly = 1;
/*helper function to safely read acpi pm timesource*/
static inline u32 read_pmtmr(void)
{
u32 v1=0,v2=0,v3=0;
/* It has been reported that because of various broken
* chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
* source is not latched, so you must read it multiple
* times to insure a safe value is read.
*/
do {
v1 = inl(pmtmr_ioport);
v2 = inl(pmtmr_ioport);
v3 = inl(pmtmr_ioport);
} while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
|| (v3 > v1 && v3 < v2));
if (pmtmr_need_workaround) {
u32 v1, v2, v3;
/* mask the output to 24 bits */
return v2 & ACPI_PM_MASK;
/* It has been reported that because of various broken
* chipsets (ICH4, PIIX4 and PIIX4E) where the ACPI PM time
* source is not latched, so you must read it multiple
* times to insure a safe value is read.
*/
do {
v1 = inl(pmtmr_ioport);
v2 = inl(pmtmr_ioport);
v3 = inl(pmtmr_ioport);
} while ((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1)
|| (v3 > v1 && v3 < v2));
/* mask the output to 24 bits */
return v2 & ACPI_PM_MASK;
}
return inl(pmtmr_ioport) & ACPI_PM_MASK;
}
@ -263,6 +271,72 @@ struct init_timer_opts __initdata timer_pmtmr_init = {
.opts = &timer_pmtmr,
};
#ifdef CONFIG_PCI
/*
* PIIX4 Errata:
*
* The power management timer may return improper results when read.
* Although the timer value settles properly after incrementing,
* while incrementing there is a 3 ns window every 69.8 ns where the
* timer value is indeterminate (a 4.2% chance that the data will be
* incorrect when read). As a result, the ACPI free running count up
* timer specification is violated due to erroneous reads.
*/
static int __init pmtmr_bug_check(void)
{
static struct pci_device_id gray_list[] __initdata = {
/* these chipsets may have bug. */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82801DB_0) },
{ },
};
struct pci_dev *dev;
int pmtmr_has_bug = 0;
u8 rev;
if (cur_timer != &timer_pmtmr || !pmtmr_need_workaround)
return 0;
dev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
if (dev) {
pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
/* the bug has been fixed in PIIX4M */
if (rev < 3) {
printk(KERN_WARNING "* Found PM-Timer Bug on this "
"chipset. Due to workarounds for a bug,\n"
"* this time source is slow. Consider trying "
"other time sources (clock=)\n");
pmtmr_has_bug = 1;
}
pci_dev_put(dev);
}
if (pci_dev_present(gray_list)) {
printk(KERN_WARNING "* This chipset may have PM-Timer Bug. Due"
" to workarounds for a bug,\n"
"* this time source is slow. If you are sure your timer"
" does not have\n"
"* this bug, please use \"pmtmr_good\" to disable the "
"workaround\n");
pmtmr_has_bug = 1;
}
if (!pmtmr_has_bug)
pmtmr_need_workaround = 0;
return 0;
}
device_initcall(pmtmr_bug_check);
#endif
static int __init pmtr_good_setup(char *__str)
{
pmtmr_need_workaround = 0;
return 1;
}
__setup("pmtmr_good", pmtr_good_setup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION("Power Management Timer (PMTMR) as primary timing source for x86");

View File

@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
struct notifier_block *i386die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
vmalloc_sync_all();
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&i386die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
return atomic_notifier_chain_register(&i386die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&i386die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&

View File

@ -1700,7 +1700,7 @@ after_handle_vic_irq(unsigned int irq)
printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
cpu, irq);
for_each_cpu(real_cpu, mask) {
for_each_possible_cpu(real_cpu, mask) {
outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
VIC_PROCESSOR_ID);

View File

@ -31,6 +31,7 @@
#include <linux/nodemask.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/pfn.h>
#include <asm/e820.h>
#include <asm/setup.h>
@ -352,17 +353,6 @@ void __init zone_sizes_init(void)
{
int nid;
/*
* Insert nodes into pgdat_list backward so they appear in order.
* Clobber node 0's links and NULL out pgdat_list before starting.
*/
pgdat_list = NULL;
for (nid = MAX_NUMNODES - 1; nid >= 0; nid--) {
if (!node_online(nid))
continue;
NODE_DATA(nid)->pgdat_next = pgdat_list;
pgdat_list = NODE_DATA(nid);
}
for_each_online_node(nid) {
unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};

View File

@ -36,7 +36,7 @@ void show_mem(void)
printk(KERN_INFO "Mem-info:\n");
show_free_areas();
printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for_each_online_pgdat(pgdat) {
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; ++i) {
page = pgdat_page_nr(pgdat, i);

View File

@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
static void free_msrs(void)
{
int i;
for_each_cpu(i) {
for_each_possible_cpu(i) {
kfree(cpu_msrs[i].counters);
cpu_msrs[i].counters = NULL;
kfree(cpu_msrs[i].controls);

View File

@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock;
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
struct notifier_block *ia64die_chain;
ATOMIC_NOTIFIER_HEAD(ia64die_chain);
int
register_die_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&ia64die_chain, nb);
return atomic_notifier_chain_register(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
int
unregister_die_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&ia64die_chain, nb);
return atomic_notifier_chain_unregister(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_die_notifier);

View File

@ -378,31 +378,6 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
return ptr;
}
/**
* pgdat_insert - insert the pgdat into global pgdat_list
* @pgdat: the pgdat for a node.
*/
static void __init pgdat_insert(pg_data_t *pgdat)
{
pg_data_t *prev = NULL, *next;
for_each_pgdat(next)
if (pgdat->node_id < next->node_id)
break;
else
prev = next;
if (prev) {
prev->pgdat_next = pgdat;
pgdat->pgdat_next = next;
} else {
pgdat->pgdat_next = pgdat_list;
pgdat_list = pgdat;
}
return;
}
/**
* memory_less_nodes - allocate and initialize CPU only nodes pernode
* information.
@ -560,7 +535,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for_each_online_pgdat(pgdat) {
unsigned long present;
unsigned long flags;
int shared = 0, cached = 0, reserved = 0;
@ -745,11 +720,5 @@ void __init paging_init(void)
pfn_offset, zholes_size);
}
/*
* Make memory less nodes become a member of the known nodes.
*/
for_each_node_mask(node, memory_less_mask)
pgdat_insert(mem_data[node].pgdat);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}

View File

@ -600,7 +600,7 @@ mem_init (void)
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
kclist_add(&kcore_kernel, _stext, _end - _stext);
for_each_pgdat(pgdat)
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);

View File

@ -93,19 +93,22 @@ static int coherence_id_open(struct inode *inode, struct file *file)
static struct proc_dir_entry
*sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent,
int (*openfunc)(struct inode *, struct file *),
int (*releasefunc)(struct inode *, struct file *))
int (*releasefunc)(struct inode *, struct file *),
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *))
{
struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
if (e) {
e->proc_fops = (struct file_operations *)kmalloc(
sizeof(struct file_operations), GFP_KERNEL);
if (e->proc_fops) {
memset(e->proc_fops, 0, sizeof(struct file_operations));
e->proc_fops->open = openfunc;
e->proc_fops->read = seq_read;
e->proc_fops->llseek = seq_lseek;
e->proc_fops->release = releasefunc;
struct file_operations *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (f) {
f->open = openfunc;
f->read = seq_read;
f->llseek = seq_lseek;
f->release = releasefunc;
f->write = write;
e->proc_fops = f;
}
}
@ -119,31 +122,29 @@ extern int sn_topology_release(struct inode *, struct file *);
void register_sn_procfs(void)
{
static struct proc_dir_entry *sgi_proc_dir = NULL;
struct proc_dir_entry *e;
BUG_ON(sgi_proc_dir != NULL);
if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
return;
sn_procfs_create_entry("partition_id", sgi_proc_dir,
partition_id_open, single_release);
partition_id_open, single_release, NULL);
sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
system_serial_number_open, single_release);
system_serial_number_open, single_release, NULL);
sn_procfs_create_entry("licenseID", sgi_proc_dir,
licenseID_open, single_release);
licenseID_open, single_release, NULL);
e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
sn_force_interrupt_open, single_release);
if (e)
e->proc_fops->write = sn_force_interrupt_write_proc;
sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
sn_force_interrupt_open, single_release,
sn_force_interrupt_write_proc);
sn_procfs_create_entry("coherence_id", sgi_proc_dir,
coherence_id_open, single_release);
coherence_id_open, single_release, NULL);
sn_procfs_create_entry("sn_topology", sgi_proc_dir,
sn_topology_open, sn_topology_release);
sn_topology_open, sn_topology_release, NULL);
}
#endif /* CONFIG_PROC_FS */

View File

@ -24,6 +24,7 @@
#include <linux/tty.h>
#include <linux/cpu.h>
#include <linux/nodemask.h>
#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/pgtable.h>

View File

@ -13,6 +13,7 @@
#include <linux/initrd.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <linux/pfn.h>
#include <asm/setup.h>
@ -137,12 +138,6 @@ unsigned long __init zone_sizes_init(void)
int nid, i;
mem_prof_t *mp;
pgdat_list = NULL;
for (nid = num_online_nodes() - 1 ; nid >= 0 ; nid--) {
NODE_DATA(nid)->pgdat_next = pgdat_list;
pgdat_list = NODE_DATA(nid);
}
for_each_online_node(nid) {
mp = &mem_prof[nid];
for (i = 0 ; i < MAX_NR_ZONES ; i++) {

View File

@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/bitops.h>
#include <linux/nodemask.h>
#include <linux/pfn.h>
#include <asm/types.h>
#include <asm/processor.h>
#include <asm/page.h>
@ -47,7 +48,7 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for_each_online_pgdat(pgdat) {
unsigned long flags;
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; ++i) {

View File

@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
#include <linux/smp_lock.h>
#include <linux/bcd.h>
#include <asm/mvme16xhw.h>
#include <asm/io.h>
@ -31,9 +32,6 @@
* ioctls.
*/
#define BCD2BIN(val) (((val)&15) + ((val)>>4)*10)
#define BIN2BCD(val) ((((val)/10)<<4) + (val)%10)
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};

View File

@ -94,7 +94,7 @@ void __init plat_setup(void)
argptr = prom_getcmdline();
#if defined(CONFIG_SERIAL_AU1X00_CONSOLE) || defined(CONFIG_SERIAL_8250_CONSOLE)
#ifdef CONFIG_SERIAL_8250_CONSOLE
if ((argptr = strstr(argptr, "console=")) == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");

View File

@ -165,6 +165,6 @@ rtc_ds1386_init(unsigned long base)
WRITE_RTC(0xB, byte);
/* set the function pointers */
rtc_get_time = rtc_ds1386_get_time;
rtc_set_time = rtc_ds1386_set_time;
rtc_mips_get_time = rtc_ds1386_get_time;
rtc_mips_set_time = rtc_ds1386_set_time;
}

View File

@ -36,41 +36,13 @@
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/machtype.h>
/*
* Returns true if a clock update is in progress
*/
static inline unsigned char dec_rtc_is_updating(void)
{
unsigned char uip;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irqrestore(&rtc_lock, flags);
return uip;
}
static unsigned long dec_rtc_get_time(void)
{
unsigned int year, mon, day, hour, min, sec, real_year;
int i;
unsigned long flags;
/* The Linux interpretation of the DS1287 clock register contents:
* When the Update-In-Progress (UIP) flag goes from 1 to 0, the
* RTC registers show the second which has precisely just started.
* Let's hope other operating systems interpret the RTC the same way.
*/
/* read RTC exactly on falling edge of update flag */
for (i = 0; i < 1000000; i++) /* may take up to 1 second... */
if (dec_rtc_is_updating())
break;
for (i = 0; i < 1000000; i++) /* must try at least 2.228 ms */
if (!dec_rtc_is_updating())
break;
spin_lock_irqsave(&rtc_lock, flags);
/* Isn't this overkill? UIP above should guarantee consistency */
do {
sec = CMOS_READ(RTC_SECONDS);
min = CMOS_READ(RTC_MINUTES);
@ -78,7 +50,16 @@ static unsigned long dec_rtc_get_time(void)
day = CMOS_READ(RTC_DAY_OF_MONTH);
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
/*
* The PROM will reset the year to either '72 or '73.
* Therefore we store the real year separately, in one
* of unused BBU RAM locations.
*/
real_year = CMOS_READ(RTC_DEC_YEAR);
} while (sec != CMOS_READ(RTC_SECONDS));
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = BCD2BIN(sec);
min = BCD2BIN(min);
@ -87,13 +68,7 @@ static unsigned long dec_rtc_get_time(void)
mon = BCD2BIN(mon);
year = BCD2BIN(year);
}
/*
* The PROM will reset the year to either '72 or '73.
* Therefore we store the real year separately, in one
* of unused BBU RAM locations.
*/
real_year = CMOS_READ(RTC_DEC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
year += real_year - 72 + 2000;
return mktime(year, mon, day, hour, min, sec);
@ -193,8 +168,8 @@ static void dec_ioasic_hpt_init(unsigned int count)
void __init dec_time_init(void)
{
rtc_get_time = dec_rtc_get_time;
rtc_set_mmss = dec_rtc_set_mmss;
rtc_mips_get_time = dec_rtc_get_time;
rtc_mips_set_mmss = dec_rtc_set_mmss;
mips_timer_state = dec_timer_state;
mips_timer_ack = dec_timer_ack;

View File

@ -227,8 +227,8 @@ void __init it8172_time_init(void)
local_irq_restore(flags);
rtc_get_time = it8172_rtc_get_time;
rtc_set_time = it8172_rtc_set_time;
rtc_mips_get_time = it8172_rtc_get_time;
rtc_mips_set_time = it8172_rtc_set_time;
}
#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)

View File

@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void);
extern unsigned long __init prom_get_memsize(void);
extern void __init it8172_init_ram_resource(unsigned long memsize);
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
const char *get_system_type(void)
{
return "Globespan IVR";

View File

@ -45,9 +45,6 @@ extern void __init prom_init_cmdline(void);
extern unsigned long __init prom_get_memsize(void);
extern void __init it8172_init_ram_resource(unsigned long memsize);
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
const char *get_system_type(void)
{
return "ITE QED-4N-S01B";

View File

@ -159,8 +159,8 @@ rtc_ds1742_init(unsigned long base)
db_assert((rtc_base & 0xe0000000) == KSEG1);
/* set the function pointers */
rtc_get_time = rtc_ds1742_get_time;
rtc_set_time = rtc_ds1742_set_time;
rtc_mips_get_time = rtc_ds1742_get_time;
rtc_mips_set_time = rtc_ds1742_set_time;
/* clear oscillator stop bit */
CMOS_WRITE(RTC_READ, RTC_CONTROL);

View File

@ -34,6 +34,7 @@
#include <linux/highmem.h>
#include <linux/console.h>
#include <linux/mmzone.h>
#include <linux/pfn.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@ -257,10 +258,6 @@ static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_en
return 0;
}
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
#define MAXMEM HIGHMEM_START
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
@ -493,10 +490,6 @@ static inline void resource_init(void)
}
}
#undef PFN_UP
#undef PFN_DOWN
#undef PFN_PHYS
#undef MAXMEM
#undef MAXMEM_PFN

View File

@ -65,9 +65,9 @@ static int null_rtc_set_time(unsigned long sec)
return 0;
}
unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
int (*rtc_set_mmss)(unsigned long);
unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time;
int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
int (*rtc_mips_set_mmss)(unsigned long);
/* usecs per counter cycle, shifted to left by 32 bits */
@ -440,14 +440,14 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*
* If we have an externally synchronized Linux clock, then update
* CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be
* CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
* called as close as possible to 500 ms before the new second starts.
*/
if (ntp_synced() &&
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
if (rtc_set_mmss(xtime.tv_sec) == 0) {
if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
last_rtc_update = xtime.tv_sec;
} else {
/* do it again in 60 s */
@ -565,7 +565,7 @@ asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs)
* b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use fixed_rate_gettimeoffset
* or use cpu counter as timer interrupt source)
* 2) setup xtime based on rtc_get_time().
* 2) setup xtime based on rtc_mips_get_time().
* 3) choose a appropriate gettimeoffset routine.
* 4) calculate a couple of cached variables for later usage
* 5) board_timer_setup() -
@ -633,10 +633,10 @@ void __init time_init(void)
if (board_time_init)
board_time_init();
if (!rtc_set_mmss)
rtc_set_mmss = rtc_set_time;
if (!rtc_mips_set_mmss)
rtc_mips_set_mmss = rtc_mips_set_time;
xtime.tv_sec = rtc_get_time();
xtime.tv_sec = rtc_mips_get_time();
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
@ -772,8 +772,8 @@ void to_tm(unsigned long tim, struct rtc_time *tm)
EXPORT_SYMBOL(rtc_lock);
EXPORT_SYMBOL(to_tm);
EXPORT_SYMBOL(rtc_set_time);
EXPORT_SYMBOL(rtc_get_time);
EXPORT_SYMBOL(rtc_mips_set_time);
EXPORT_SYMBOL(rtc_mips_get_time);
unsigned long long sched_clock(void)
{

View File

@ -165,7 +165,8 @@ void __init plat_setup(void)
/* Set up panic notifier */
for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++)
notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]);
atomic_notifier_chain_register(&panic_notifier_list,
&lasat_panic_block[i]);
lasat_reboot_setup();
@ -174,8 +175,8 @@ void __init plat_setup(void)
#ifdef CONFIG_DS1603
ds1603 = &ds_defs[mips_machtype];
rtc_get_time = ds1603_read;
rtc_set_time = ds1603_set;
rtc_mips_get_time = ds1603_read;
rtc_mips_set_time = ds1603_set;
#endif
#ifdef DYNAMIC_SERIAL_INIT

View File

@ -65,7 +65,7 @@ void __init plat_setup(void)
board_time_init = mips_time_init;
board_timer_setup = mips_timer_setup;
rtc_get_time = mips_rtc_get_time;
rtc_mips_get_time = mips_rtc_get_time;
}
static void __init serial_init(void)

View File

@ -49,9 +49,6 @@ static char *mtypes[3] = {
/* References to section boundaries */
extern char _end;
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
struct prom_pmemblock * __init prom_getmdesc(void)
{
char *memsize_str;
@ -109,10 +106,10 @@ struct prom_pmemblock * __init prom_getmdesc(void)
mdesc[3].type = yamon_dontuse;
mdesc[3].base = 0x00100000;
mdesc[3].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[3].base;
mdesc[3].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[3].base;
mdesc[4].type = yamon_free;
mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
mdesc[4].base = CPHYSADDR(PAGE_ALIGN(&_end));
mdesc[4].size = memsize - mdesc[4].base;
return &mdesc[0];

View File

@ -225,5 +225,5 @@ void __init plat_setup(void)
board_time_init = mips_time_init;
board_timer_setup = mips_timer_setup;
rtc_get_time = mips_rtc_get_time;
rtc_mips_get_time = mips_rtc_get_time;
}

View File

@ -42,9 +42,6 @@ static char *mtypes[3] = {
/* References to section boundaries */
extern char _end;
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
struct prom_pmemblock * __init prom_getmdesc(void)
{
unsigned int memsize;
@ -64,10 +61,10 @@ struct prom_pmemblock * __init prom_getmdesc(void)
mdesc[2].type = simmem_reserved;
mdesc[2].base = 0x00100000;
mdesc[2].size = CPHYSADDR(PFN_ALIGN(&_end)) - mdesc[2].base;
mdesc[2].size = CPHYSADDR(PAGE_ALIGN(&_end)) - mdesc[2].base;
mdesc[3].type = simmem_free;
mdesc[3].base = CPHYSADDR(PFN_ALIGN(&_end));
mdesc[3].base = CPHYSADDR(PAGE_ALIGN(&_end));
mdesc[3].size = memsize - mdesc[3].base;
return &mdesc[0];

View File

@ -25,6 +25,7 @@
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
@ -177,9 +178,6 @@ void __init paging_init(void)
free_area_init(zones_size);
}
#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
static inline int page_is_ram(unsigned long pagenr)
{
int i;

Some files were not shown because too many files have changed in this diff Show More