1
0
Fork 0

More AP / SP bits for the 34K, the Malta bits and things. Still wants

a little polishing.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
hifive-unleashed-5.1
Ralf Baechle 2005-07-14 15:57:16 +00:00
parent 86071b637d
commit e01402b115
18 changed files with 2332 additions and 139 deletions

View File

@ -13,6 +13,22 @@ mainmenu "Linux/MIPS Kernel Configuration"
source "init/Kconfig"
config CPU_MIPS32
bool
default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
config CPU_MIPS64
bool
default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
config CPU_MIPSR1
bool
default y if CPU_MIPS32_R1 || CPU_MIPS64_R1
config CPU_MIPSR2
bool
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2
config SYS_SUPPORTS_32BIT_KERNEL
bool
config SYS_SUPPORTS_64BIT_KERNEL
@ -233,6 +249,7 @@ config MIPS_EV64120
bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)"
depends on EXPERIMENTAL
select DMA_NONCOHERENT
select IRQ_CPU
select HW_HAS_PCI
select MIPS_GT64120
select SYS_SUPPORTS_32BIT_KERNEL
@ -344,6 +361,7 @@ config MIPS_MALTA
select BOOT_ELF32
select HAVE_STD_PC_SERIAL_PORT
select DMA_NONCOHERENT
select IRQ_CPU
select GENERIC_ISA_DMA
select HW_HAS_PCI
select I8259
@ -1277,6 +1295,31 @@ config CPU_HAS_PREFETCH
bool "Enable prefetches" if CPU_SB1 && !CPU_SB1_PASS_2
default y if CPU_MIPS32 || CPU_MIPS64 || CPU_RM7000 || CPU_RM9000 || CPU_R10000
config MIPS_MT
bool "Enable MIPS MT"
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on MIPS_MT
help
Includes a loader for loading an elf relocatable object
onto another VPE and running it.
config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER
default y
help
The loader can use memory that is present but has been hidden from
Linux using the kernel command line option "mem=xxMB". It's up to
you to ensure the amount you put in the option and the space your
program requires is less or equal to the amount physically present.
# this should possibly be in drivers/char, but it is rather cpu related. Hmmm
config MIPS_VPE_APSP_API
bool "Enable support for AP/SP API (RTLX)"
depends on MIPS_VPE_LOADER
config VTAG_ICACHE
bool "Support for Virtual Tagged I-cache" if CPU_MIPS64 || CPU_MIPS32
default y if CPU_SB1
@ -1335,6 +1378,35 @@ config CPU_HAS_WB
machines which require flushing of write buffers in software. Saying
Y is the safe option; N may result in kernel malfunction and crashes.
menu "MIPSR2 Interrupt handling"
depends on CPU_MIPSR2 && CPU_ADVANCED
config CPU_MIPSR2_IRQ_VI
bool "Vectored interrupt mode"
help
Vectored interrupt mode allowing faster dispatching of interrupts.
The board support code needs to be written to take advantage of this
mode. Compatibility code is included to allow the kernel to run on
a CPU that does not support vectored interrupts. It's safe to
say Y here.
config CPU_MIPSR2_IRQ_EI
bool "External interrupt controller mode"
help
Extended interrupt mode takes advantage of an external interrupt
controller to allow fast dispatching from many possible interrupt
sources. Say N unless you know that external interrupt support is
required.
config CPU_MIPSR2_SRS
bool "Make shadow set registers available for interrupt handlers"
depends on CPU_MIPSR2_IRQ_VI || CPU_MIPSR2_IRQ_EI
help
Allow the kernel to use shadow register sets for fast interrupts.
Interrupt handlers must be specially written to use shadow sets.
Say N unless you know that shadow register set upport is needed.
endmenu
config CPU_HAS_SYNC
bool
depends on !CPU_R3000

View File

@ -34,12 +34,16 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o

View File

@ -147,6 +147,38 @@ NESTED(except_vec_ejtag_debug, 0, sp)
__FINIT
/*
* Vectored interrupt handler.
* This prototype is copied to ebase + n*IntCtl.VS and patched
* to invoke the handler
*/
NESTED(except_vec_vi, 0, sp)
SAVE_SOME
SAVE_AT
.set push
.set noreorder
EXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */
j except_vec_vi_handler
EXPORT(except_vec_vi_ori)
ori v0, 0 /* Patched */
.set pop
END(except_vec_vi)
EXPORT(except_vec_vi_end)
/*
* Common Vectored Interrupt code
* Complete the register saves and invoke the handler which is passed in $v0
*/
NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP
SAVE_STATIC
CLI
move a0, sp
jalr v0
j ret_from_irq
END(except_vec_vi_handler)
/*
* EJTAG debug exception handler.
*/

View File

@ -74,7 +74,7 @@ static void disable_msc_irq(unsigned int irq)
static void level_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
if (!cpu_has_ei)
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
}
@ -84,7 +84,7 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
static void edge_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
if (!cpu_has_ei)
if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
u32 r;
@ -166,14 +166,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_desc[base+n].handler = &msc_edgeirq_type;
if (cpu_has_ei)
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_desc[base+n].handler = &msc_levelirq_type;
if (cpu_has_ei)
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);

View File

@ -0,0 +1,341 @@
/*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/mipsmtregs.h>
#include <asm/cacheflush.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/rtlx.h>
#define RTLX_MAJOR 64
#define RTLX_TARG_VPE 1
struct rtlx_info *rtlx;
static int major;
static char module_name[] = "rtlx";
static inline int spacefree(int read, int write, int size);
static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
} channel_wqs[RTLX_CHANNELS];
static struct irqaction irq;
static int irq_num;
extern void *vpe_get_shared(int index);
static void rtlx_dispatch(struct pt_regs *regs)
{
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
}
irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
irqreturn_t r = IRQ_HANDLED;
int i;
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
if (chan->lx_read != chan->lx_write)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
return r;
}
void dump_rtlx(void)
{
int i;
printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
printk(" rt_state %d lx_state %d buffer_size %d\n",
chan->rt_state, chan->lx_state, chan->buffer_size);
printk(" rt_read %d rt_write %d\n",
chan->rt_read, chan->rt_write);
printk(" lx_read %d lx_write %d\n",
chan->lx_read, chan->lx_write);
printk(" rt_buffer <%s>\n", chan->rt_buffer);
printk(" lx_buffer <%s>\n", chan->lx_buffer);
}
}
/* call when we have the address of the shared structure from the SP side. */
static int rtlx_init(struct rtlx_info *rtlxi)
{
int i;
if (rtlxi->id != RTLX_ID) {
printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
return (-ENOEXEC);
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
}
/* set up for interrupt handling */
memset(&irq, 0, sizeof(struct irqaction));
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
}
irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
irq.handler = rtlx_interrupt;
irq.flags = SA_INTERRUPT;
irq.name = "RTLX";
irq.dev_id = rtlx;
setup_irq(irq_num, &irq);
rtlx = rtlxi;
return (0);
}
/* only allow one open process at a time to open each channel */
static int rtlx_open(struct inode *inode, struct file *filp)
{
int minor, ret;
struct rtlx_channel *chan;
/* assume only 1 device at the mo. */
minor = MINOR(inode->i_rdev);
if (rtlx == NULL) {
struct rtlx_info **p;
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n");
return (-EFAULT);
}
if (*p == NULL) {
printk(" vpe_shared %p %p\n", p, *p);
return (-EFAULT);
}
if ((ret = rtlx_init(*p)) < 0)
return (ret);
}
chan = &rtlx->channel[minor];
/* already open? */
if (chan->lx_state == RTLX_STATE_OPENED)
return (-EBUSY);
chan->lx_state = RTLX_STATE_OPENED;
return (0);
}
static int rtlx_release(struct inode *inode, struct file *filp)
{
int minor;
minor = MINOR(inode->i_rdev);
rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED;
return (0);
}
static unsigned int rtlx_poll(struct file *file, poll_table * wait)
{
int minor;
unsigned int mask = 0;
struct rtlx_channel *chan;
minor = MINOR(file->f_dentry->d_inode->i_rdev);
chan = &rtlx->channel[minor];
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
/* data available to read? */
if (chan->lx_read != chan->lx_write)
mask |= POLLIN | POLLRDNORM;
/* space to write */
if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size))
mask |= POLLOUT | POLLWRNORM;
return (mask);
}
static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
loff_t * ppos)
{
size_t fl = 0L;
int minor;
struct rtlx_channel *lx;
DECLARE_WAITQUEUE(wait, current);
minor = MINOR(file->f_dentry->d_inode->i_rdev);
lx = &rtlx->channel[minor];
/* data available? */
if (lx->lx_write == lx->lx_read) {
if (file->f_flags & O_NONBLOCK)
return (0); // -EAGAIN makes cat whinge
/* go to sleep */
add_wait_queue(&channel_wqs[minor].lx_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (lx->lx_write == lx->lx_read)
schedule();
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[minor].lx_queue, &wait);
/* back running */
}
/* find out how much in total */
count = min( count,
(size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
/* then how much from the read pointer onwards */
fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
/* and if there is anything left at the beginning of the buffer */
if ( count - fl )
copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
/* update the index */
lx->lx_read += count;
lx->lx_read %= lx->buffer_size;
return (count);
}
static inline int spacefree(int read, int write, int size)
{
if (read == write) {
/* never fill the buffer completely, so indexes are always equal if empty
and only empty, or !equal if data available */
return (size - 1);
}
return ((read + size - write) % size) - 1;
}
static ssize_t rtlx_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
int minor;
struct rtlx_channel *rt;
size_t fl;
DECLARE_WAITQUEUE(wait, current);
minor = MINOR(file->f_dentry->d_inode->i_rdev);
rt = &rtlx->channel[minor];
/* any space left... */
if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) {
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size))
schedule();
set_current_state(TASK_RUNNING);
remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
}
/* total number of bytes to copy */
count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
/* if there's any left copy to the beginning of the buffer */
if( count - fl )
copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
return(count);
}
static struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = rtlx_open,
.release = rtlx_release,
.write = rtlx_write,
.read = rtlx_read,
.poll = rtlx_poll
};
static int rtlx_module_init(void)
{
if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) {
printk("rtlx_module_init: unable to register device\n");
return (-EBUSY);
}
if (major == 0)
major = RTLX_MAJOR;
return (0);
}
static void rtlx_module_exit(void)
{
unregister_chrdev(major, module_name);
}
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
MODULE_DESCRIPTION("MIPS RTLX");
MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
MODULE_LICENSE("GPL");

View File

@ -20,6 +20,7 @@
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@ -64,6 +65,9 @@ extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
/*
* These constant is for searching for possible module text segments.
@ -813,6 +817,12 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
asmlinkage void do_default_vi(struct pt_regs *regs)
{
show_regs(regs);
panic("Caught unexpected vectored interrupt.");
}
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@ -921,7 +931,11 @@ void nmi_exception_handler(struct pt_regs *regs)
while(1) ;
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
/*
* As a side effect of the way this is implemented we're limited
@ -935,13 +949,156 @@ void *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
*(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |
*(volatile u32 *)(ebase + 0x200) = 0x08000000 |
(0x03ffffff & (handler >> 2));
flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);
flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
}
#ifdef CONFIG_CPU_MIPSR2
/*
* Shadow register allocation
* FIXME: SMP...
*/
/* MIPSR2 shadow register sets */
struct shadow_registers {
spinlock_t sr_lock; /* */
int sr_supported; /* Number of shadow register sets supported */
int sr_allocated; /* Bitmap of allocated shadow registers */
} shadow_registers;
void mips_srs_init(void)
{
#ifdef CONFIG_CPU_MIPSR2_SRS
shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported);
#else
shadow_registers.sr_supported = 1;
#endif
shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
spin_lock_init(&shadow_registers.sr_lock);
}
int mips_srs_max(void)
{
return shadow_registers.sr_supported;
}
int mips_srs_alloc (void)
{
struct shadow_registers *sr = &shadow_registers;
unsigned long flags;
int set;
spin_lock_irqsave(&sr->sr_lock, flags);
for (set = 0; set < sr->sr_supported; set++) {
if ((sr->sr_allocated & (1 << set)) == 0) {
sr->sr_allocated |= 1 << set;
spin_unlock_irqrestore(&sr->sr_lock, flags);
return set;
}
}
/* None available */
spin_unlock_irqrestore(&sr->sr_lock, flags);
return -1;
}
void mips_srs_free (int set)
{
struct shadow_registers *sr = &shadow_registers;
unsigned long flags;
spin_lock_irqsave(&sr->sr_lock, flags);
sr->sr_allocated &= ~(1 << set);
spin_unlock_irqrestore(&sr->sr_lock, flags);
}
void *set_vi_srs_handler (int n, void *addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
u32 *w;
unsigned char *b;
if (!cpu_has_veic && !cpu_has_vint)
BUG();
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
}
else
handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= mips_srs_max())
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt (n, srs);
}
else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (mips_srs_max() > 1)
change_c0_srsmap (0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
const int handler_len = &except_vec_vi_end - &except_vec_vi;
const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic ("VECTORSPACING too small");
}
memcpy (b, &except_vec_vi, handler_len);
w = (u32 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler
*
* It is the handlers responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret"
*/
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
flush_icache_range((unsigned long)b, (unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler (int n, void *addr)
{
return set_vi_srs_handler (n, addr, 0);
}
#endif
/*
* This is used by native signal handling
*/
@ -1016,10 +1173,18 @@ void __init per_cpu_trap_init(void)
if (cpu_has_dsp)
set_c0_status(ST0_MX);
#ifdef CONFIG_CPU_MIPSR2
write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
#endif
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
* Interrupt handling.
*/
if (cpu_has_veic || cpu_has_vint) {
write_c0_ebase (ebase);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl (0x3e0, VECTORSPACING);
}
if (cpu_has_divec)
set_c0_cause(CAUSEF_IV);
@ -1035,13 +1200,41 @@ void __init per_cpu_trap_init(void)
tlb_init();
}
/* Install CPU exception handler */
void __init set_handler (unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
flush_icache_range(ebase + offset, ebase + offset + size);
}
/* Install uncached CPU exception handler */
void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase);
#endif
#ifdef CONFIG_64BIT
unsigned long uncached_ebase = TO_UNCAC(ebase);
#endif
memcpy((void *)(uncached_ebase + offset), addr, size);
}
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec_ejtag_debug;
extern char except_vec4;
unsigned long i;
if (cpu_has_veic || cpu_has_vint)
ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
else
ebase = CAC_BASE;
#ifdef CONFIG_CPU_MIPSR2
mips_srs_init();
#endif
per_cpu_trap_init();
/*
@ -1049,7 +1242,7 @@ void __init trap_init(void)
* This will be overriden later as suitable for a particular
* configuration.
*/
memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
@ -1061,8 +1254,8 @@ void __init trap_init(void)
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag)
memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup ();
/*
* Only some CPUs have the watch exceptions.
@ -1071,11 +1264,15 @@ void __init trap_init(void)
set_except_vector(23, handle_watch);
/*
* Some MIPS CPUs have a dedicated interrupt vector which reduces the
* interrupt processing overhead. Use it where available.
* Initialise interrupt handlers
*/
if (cpu_has_divec)
memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler (i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
@ -1122,6 +1319,10 @@ void __init trap_init(void)
//set_except_vector(15, handle_ndc);
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
@ -1146,5 +1347,5 @@ void __init trap_init(void)
signal32_init();
#endif
flush_icache_range(CAC_BASE, CAC_BASE + 0x400);
flush_icache_range(ebase, ebase + 0x400);
}

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,8 @@
#include <asm/gt64120.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/generic.h>
@ -224,6 +226,30 @@ void __init kgdb_config (void)
}
#endif
void __init mips_nmi_setup (void)
{
void *base;
extern char except_vec_nmi;
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380);
memcpy(base, &except_vec_nmi, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
void __init mips_ejtag_setup (void)
{
void *base;
extern char except_vec_ejtag_debug;
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300);
memcpy(base, &except_vec_ejtag_debug, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
void __init prom_init(void)
{
u32 start, map, mask, data;
@ -353,6 +379,9 @@ void __init prom_init(void)
while(1); /* We die here... */
}
#endif
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
prom_printf("\nLINUX started...\n");
prom_init_cmdline();
prom_meminit();

View File

@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
@ -55,18 +56,30 @@ struct prom_pmemblock * __init prom_getmdesc(void)
{
char *memsize_str;
unsigned int memsize;
char cmdline[CL_SIZE], *ptr;
memsize_str = prom_getenv("memsize");
if (!memsize_str) {
prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
memsize = 0x02000000;
} else {
#ifdef DEBUG
prom_printf("prom_memsize = %s\n", memsize_str);
#endif
memsize = simple_strtol(memsize_str, NULL, 0);
/* Check the command line first for a memsize directive */
strcpy(cmdline, arcs_cmdline);
ptr = strstr(cmdline, "memsize=");
if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
ptr = strstr(ptr, " memsize=");
if (ptr) {
memsize = memparse(ptr + 8, &ptr);
}
else {
/* otherwise look in the environment */
memsize_str = prom_getenv("memsize");
if (!memsize_str) {
prom_printf("memsize not set in boot prom, set to default (32Mb)\n");
memsize = 0x02000000;
} else {
#ifdef DEBUG
prom_printf("prom_memsize = %s\n", memsize_str);
#endif
memsize = simple_strtol(memsize_str, NULL, 0);
}
}
memset(mdesc, 0, sizeof(mdesc));
mdesc[0].type = yamon_dontuse;

View File

@ -29,6 +29,20 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
#ifdef CONFIG_MIPS_ATLAS
#include <asm/mips-boards/atlasint.h>
#define CASCADE_IRQ MIPSCPU_INT_ATLAS
#define CASCADE_DISPATCH atlas_hw0_irqdispatch
#endif
#ifdef CONFIG_MIPS_MALTA
#include <asm/mips-boards/maltaint.h>
#define CASCADE_IRQ MIPSCPU_INT_I8259A
#define CASCADE_DISPATCH malta_hw0_irqdispatch
#endif
#ifdef CONFIG_MIPS_SEAD
#include <asm/mips-boards/seadint.h>
#endif
/* A lot of complication here is taken away because:
*
* 1) We handle one interrupt and return, sitting in a loop and moving across
@ -80,74 +94,62 @@
mfc0 s0, CP0_CAUSE # get irq bits
mfc0 s1, CP0_STATUS # get irq mask
andi s0, ST0_IM # CAUSE.CE may be non-zero!
and s0, s1
/* First we check for r4k counter/timer IRQ. */
andi a0, s0, CAUSEF_IP7
beq a0, zero, 1f
andi a0, s0, CAUSEF_IP2 # delay slot, check hw0 interrupt
/* Wheee, a timer interrupt. */
move a0, sp
jal mips_timer_interrupt
nop
j ret_from_irq
nop
1:
#if defined(CONFIG_MIPS_SEAD)
beq a0, zero, 1f
andi a0, s0, CAUSEF_IP3 # delay slot, check hw1 interrupt
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
.set mips32
clz a0, s0
.set mips0
negu a0
addu a0, 31-CAUSEB_IP
bltz a0, spurious
#else
beq a0, zero, 1f # delay slot, check hw3 interrupt
andi a0, s0, CAUSEF_IP5
beqz s0, spurious
li a0, 7
and t0, s0, 0xf000
sltiu t0, t0, 1
sll t0, 2
subu a0, t0
sll s0, t0
and t0, s0, 0xc000
sltiu t0, t0, 1
sll t0, 1
subu a0, t0
sll s0, t0
and t0, s0, 0x8000
sltiu t0, t0, 1
# sll t0, 0
subu a0, t0
# sll s0, t0
#endif
/* Wheee, combined hardware level zero interrupt. */
#if defined(CONFIG_MIPS_ATLAS)
jal atlas_hw0_irqdispatch
#elif defined(CONFIG_MIPS_MALTA)
jal malta_hw0_irqdispatch
#elif defined(CONFIG_MIPS_SEAD)
jal sead_hw0_irqdispatch
#ifdef CASCADE_IRQ
li a1, CASCADE_IRQ
bne a0, a1, 1f
addu a0, MIPSCPU_INT_BASE
jal CASCADE_DISPATCH
move a0, sp
j ret_from_irq
nop
1:
#else
#error "MIPS board not supported\n"
addu a0, MIPSCPU_INT_BASE
#endif
move a0, sp # delay slot
j ret_from_irq
nop # delay slot
jal do_IRQ
move a1, sp
1:
#if defined(CONFIG_MIPS_SEAD)
beq a0, zero, 1f
andi a0, s0, CAUSEF_IP5 # delay slot, check hw3 interrupt
jal sead_hw1_irqdispatch
move a0, sp # delay slot
j ret_from_irq
nop # delay slot
1:
#endif
#if defined(CONFIG_MIPS_MALTA)
beq a0, zero, 1f # check hw3 (coreHI) interrupt
nop
jal corehi_irqdispatch
move a0, sp
j ret_from_irq
nop
1:
#endif
/*
* Here by mistake? This is possible, what can happen is that by the
* time we take the exception the IRQ pin goes low, so just leave if
* this is the case.
*/
move a1,s0
PRINT("Got interrupt: c0_cause = %08x\n")
mfc0 a1, CP0_EPC
PRINT("c0_epc = %08x\n")
j ret_from_irq
spurious:
j spurious_interrupt
nop
END(mipsIRQ)

View File

@ -31,22 +31,21 @@
#include <asm/mipsregs.h>
#include <asm/ptrace.h>
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/maltaint.h>
#include <asm/mc146818-time.h>
unsigned long cpu_khz;
#if defined(CONFIG_MIPS_SEAD)
#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ5)
#else
#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
#endif
#if defined(CONFIG_MIPS_ATLAS)
static char display_string[] = " LINUX ON ATLAS ";
#endif
@ -59,20 +58,27 @@ static char display_string[] = " LINUX ON SEAD ";
static unsigned int display_count = 0;
#define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
#define MIPS_CPU_TIMER_IRQ (NR_IRQS-1)
static unsigned int timer_tick_count=0;
static int mips_cpu_timer_irq;
void mips_timer_interrupt(struct pt_regs *regs)
static void mips_timer_dispatch (struct pt_regs *regs)
{
do_IRQ (mips_cpu_timer_irq, regs);
}
irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
irqreturn_t r;
r = timer_interrupt(irq, dev_id, regs);
if ((timer_tick_count++ % HZ) == 0) {
mips_display_message(&display_string[display_count++]);
if (display_count == MAX_DISPLAY_COUNT)
display_count = 0;
display_count = 0;
}
ll_timer_interrupt(MIPS_CPU_TIMER_IRQ, regs);
return r;
}
/*
@ -140,10 +146,8 @@ void __init mips_time_init(void)
local_irq_save(flags);
#if defined(CONFIG_MIPS_ATLAS) || defined(CONFIG_MIPS_MALTA)
/* Set Data mode - binary. */
CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);
#endif
est_freq = estimate_cpu_frequency ();
@ -157,11 +161,22 @@ void __init mips_time_init(void)
void __init mips_timer_setup(struct irqaction *irq)
{
if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch);
mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
}
else {
if (cpu_has_vint)
set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch);
mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR;
}
/* we are using the cpu counter for timer interrupts */
irq->handler = no_action; /* we use our own handler */
setup_irq(MIPS_CPU_TIMER_IRQ, irq);
irq->handler = mips_timer_interrupt; /* we use our own handler */
setup_irq(mips_cpu_timer_irq, irq);
/* to generate the first timer interrupt */
write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
set_c0_status(ALLINTS);
}

View File

@ -30,6 +30,7 @@
#include <linux/random.h>
#include <asm/i8259.h>
#include <asm/irq_cpu.h>
#include <asm/io.h>
#include <asm/mips-boards/malta.h>
#include <asm/mips-boards/maltaint.h>
@ -37,8 +38,10 @@
#include <asm/gt64120.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/msc01_pci.h>
#include <asm/msc01_ic.h>
extern asmlinkage void mipsIRQ(void);
extern void mips_timer_interrupt(void);
static DEFINE_SPINLOCK(mips_irq_lock);
@ -91,13 +94,13 @@ static inline int mips_pcibios_iack(void)
return irq;
}
static inline int get_int(int *irq)
static inline int get_int(void)
{
unsigned long flags;
int irq;
spin_lock_irqsave(&mips_irq_lock, flags);
*irq = mips_pcibios_iack();
irq = mips_pcibios_iack();
/*
* IRQ7 is used to detect spurious interrupts.
@ -106,73 +109,82 @@ static inline int get_int(int *irq)
* We can differentiate between this situation and a
* "Normal" IRQ7 by reading the ISR.
*/
if (*irq == 7)
if (irq == 7)
{
outb(PIIX4_OCW3_SEL | PIIX4_OCW3_ISR,
PIIX4_ICTLR1_OCW3);
if (!(inb(PIIX4_ICTLR1_OCW3) & (1 << 7))) {
spin_unlock_irqrestore(&mips_irq_lock, flags);
irq = -1; /* Spurious interrupt */
printk("We got a spurious interrupt from PIIX4.\n");
atomic_inc(&irq_err_count);
return -1; /* Spurious interrupt. */
}
}
spin_unlock_irqrestore(&mips_irq_lock, flags);
return 0;
return irq;
}
void malta_hw0_irqdispatch(struct pt_regs *regs)
{
int irq;
if (get_int(&irq))
return; /* interrupt has already been cleared */
irq = get_int();
if (irq < 0)
return; /* interrupt has already been cleared */
do_IRQ(irq, regs);
do_IRQ(MALTA_INT_BASE+irq, regs);
}
void corehi_irqdispatch(struct pt_regs *regs)
{
unsigned int data,datahi;
/* Mask out corehi interrupt. */
clear_c0_status(IE_IRQ3);
unsigned int intrcause,datalo,datahi;
unsigned int pcimstat, intisr, inten, intpol, intedge, intsteer, pcicmd, pcibadaddr;
printk("CoreHI interrupt, shouldn't happen, so we die here!!!\n");
printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\nbadVaddr : %08lx\n"
, regs->cp0_epc, regs->cp0_status, regs->cp0_cause, regs->cp0_badvaddr);
/* Read all the registers and then print them as there is a
problem with interspersed printk's upsetting the Bonito controller.
Do it for the others too.
*/
switch(mips_revision_corid) {
case MIPS_REVISION_CORID_CORE_MSC:
case MIPS_REVISION_CORID_CORE_FPGA2:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
ll_msc_irq(regs);
break;
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
case MIPS_REVISION_CORID_CORE_FPGAR2:
data = GT_READ(GT_INTRCAUSE_OFS);
printk("GT_INTRCAUSE = %08x\n", data);
data = GT_READ(GT_CPUERR_ADDRLO_OFS);
intrcause = GT_READ(GT_INTRCAUSE_OFS);
datalo = GT_READ(GT_CPUERR_ADDRLO_OFS);
datahi = GT_READ(GT_CPUERR_ADDRHI_OFS);
printk("GT_CPUERR_ADDR = %02x%08x\n", datahi, data);
printk("GT_INTRCAUSE = %08x\n", intrcause);
printk("GT_CPUERR_ADDR = %02x%08x\n", datahi, datalo);
break;
case MIPS_REVISION_CORID_BONITO64:
case MIPS_REVISION_CORID_CORE_20K:
case MIPS_REVISION_CORID_CORE_EMUL_BON:
data = BONITO_INTISR;
printk("BONITO_INTISR = %08x\n", data);
data = BONITO_INTEN;
printk("BONITO_INTEN = %08x\n", data);
data = BONITO_INTPOL;
printk("BONITO_INTPOL = %08x\n", data);
data = BONITO_INTEDGE;
printk("BONITO_INTEDGE = %08x\n", data);
data = BONITO_INTSTEER;
printk("BONITO_INTSTEER = %08x\n", data);
data = BONITO_PCICMD;
printk("BONITO_PCICMD = %08x\n", data);
pcibadaddr = BONITO_PCIBADADDR;
pcimstat = BONITO_PCIMSTAT;
intisr = BONITO_INTISR;
inten = BONITO_INTEN;
intpol = BONITO_INTPOL;
intedge = BONITO_INTEDGE;
intsteer = BONITO_INTSTEER;
pcicmd = BONITO_PCICMD;
printk("BONITO_INTISR = %08x\n", intisr);
printk("BONITO_INTEN = %08x\n", inten);
printk("BONITO_INTPOL = %08x\n", intpol);
printk("BONITO_INTEDGE = %08x\n", intedge);
printk("BONITO_INTSTEER = %08x\n", intsteer);
printk("BONITO_PCICMD = %08x\n", pcicmd);
printk("BONITO_PCIBADADDR = %08x\n", pcibadaddr);
printk("BONITO_PCIMSTAT = %08x\n", pcimstat);
break;
}
@ -180,8 +192,70 @@ void corehi_irqdispatch(struct pt_regs *regs)
die("CoreHi interrupt", regs);
}
static struct irqaction i8259irq = {
.handler = no_action,
.name = "XT-PIC cascade"
};
static struct irqaction corehi_irqaction = {
.handler = no_action,
.name = "CoreHi"
};
msc_irqmap_t __initdata msc_irqmap[] = {
{MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0},
{MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0},
};
int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t);
msc_irqmap_t __initdata msc_eicirqmap[] = {
{MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_SW1, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_I8259A, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_SMI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_COREHI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_CORELO, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_TMR, MSC01_IRQ_EDGE, 0},
{MSC01E_INT_PCI, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0},
{MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0}
};
int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t);
void __init arch_init_irq(void)
{
set_except_vector(0, mipsIRQ);
init_i8259_irqs();
if (!cpu_has_veic)
mips_cpu_irq_init (MIPSCPU_INT_BASE);
switch(mips_revision_corid) {
case MIPS_REVISION_CORID_CORE_MSC:
case MIPS_REVISION_CORID_CORE_FPGA2:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
if (cpu_has_veic)
init_msc_irqs (MSC01E_INT_BASE, msc_eicirqmap, msc_nr_eicirqs);
else
init_msc_irqs (MSC01C_INT_BASE, msc_irqmap, msc_nr_irqs);
}
if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_I8259A, malta_hw0_irqdispatch);
set_vi_handler (MSC01E_INT_COREHI, corehi_irqdispatch);
setup_irq (MSC01E_INT_BASE+MSC01E_INT_I8259A, &i8259irq);
setup_irq (MSC01E_INT_BASE+MSC01E_INT_COREHI, &corehi_irqaction);
}
else if (cpu_has_vint) {
set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
}
else {
set_except_vector(0, mipsIRQ);
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
}
}

View File

@ -1228,8 +1228,7 @@ void __init ld_mmu_r4xx0(void)
struct cpuinfo_mips *c = &current_cpu_data;
/* Default cache error handler for R4000 and R5000 family */
memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80);
memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
set_uncached_handler (0x100, &except_vec2_generic, 0x80);
probe_pcache();
setup_scache();

View File

@ -502,9 +502,8 @@ void ld_mmu_sb1(void)
extern char handle_vec2_sb1;
/* Special cache error handler for SB1 */
memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80);
memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80);
memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
set_uncached_handler (0x100, &except_vec2_sb1, 0x80);
memcpy((void *)KSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
probe_cache_sizes();

View File

@ -25,9 +25,63 @@
#ifndef _MIPS_MALTAINT_H
#define _MIPS_MALTAINT_H
/* Number of IRQ supported on hw interrupt 0. */
#define MALTAINT_END 16
/*
* Interrupts 0..15 are used for Malta ISA compatible interrupts
*/
#define MALTA_INT_BASE 0
/*
* Interrupts 16..23 are used for Malta CPU interrupts (nonEIC mode)
*/
#define MIPSCPU_INT_BASE 16
/* CPU interrupt offsets */
#define MIPSCPU_INT_SW0 0
#define MIPSCPU_INT_SW1 1
#define MIPSCPU_INT_MB0 2
#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
#define MIPSCPU_INT_MB1 3
#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
#define MIPSCPU_INT_MB2 4
#define MIPSCPU_INT_MB3 5
#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
#define MIPSCPU_INT_MB4 6
#define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4
#define MIPSCPU_INT_CPUCTR 7
/*
* Interrupts 64..127 are used for Soc-it Classic interrupts
*/
#define MSC01C_INT_BASE 64
/* SOC-it Classic interrupt offsets */
#define MSC01C_INT_TMR 0
#define MSC01C_INT_PCI 1
/*
* Interrupts 64..127 are used for Soc-it EIC interrupts
*/
#define MSC01E_INT_BASE 64
/* SOC-it EIC interrupt offsets */
#define MSC01E_INT_SW0 1
#define MSC01E_INT_SW1 2
#define MSC01E_INT_MB0 3
#define MSC01E_INT_I8259A MSC01E_INT_MB0
#define MSC01E_INT_MB1 4
#define MSC01E_INT_SMI MSC01E_INT_MB1
#define MSC01E_INT_MB2 5
#define MSC01E_INT_MB3 6
#define MSC01E_INT_COREHI MSC01E_INT_MB3
#define MSC01E_INT_MB4 7
#define MSC01E_INT_CORELO MSC01E_INT_MB4
#define MSC01E_INT_TMR 8
#define MSC01E_INT_PCI 9
#define MSC01E_INT_PERFCTR 10
#define MSC01E_INT_CPUCTR 11
#ifndef __ASSEMBLY__
extern void maltaint_init(void);
#endif
#endif /* !(_MIPS_MALTAINT_H) */

View File

@ -0,0 +1,56 @@
/*
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
*
*/
#ifndef _RTLX_H
#define _RTLX_H_
#define LX_NODE_BASE 10
#define MIPSCPU_INT_BASE 16
#define MIPS_CPU_RTLX_IRQ 0
#define RTLX_VERSION 1
#define RTLX_xID 0x12345600
#define RTLX_ID (RTLX_xID | RTLX_VERSION)
#define RTLX_CHANNELS 8
enum rtlx_state {
RTLX_STATE_UNUSED = 0,
RTLX_STATE_INITIALISED,
RTLX_STATE_REMOTE_READY,
RTLX_STATE_OPENED
};
#define RTLX_BUFFER_SIZE 1024
/* each channel supports read and write.
linux (vpe0) reads lx_buffer and writes rt_buffer
SP (vpe1) reads rt_buffer and writes lx_buffer
*/
typedef struct rtlx_channel {
enum rtlx_state rt_state;
enum rtlx_state lx_state;
int buffer_size;
/* read and write indexes per buffer */
int rt_write, rt_read;
char *rt_buffer;
int lx_write, lx_read;
char *lx_buffer;
void *queues;
} rtlx_channel_t;
typedef struct rtlx_info {
unsigned long id;
enum rtlx_state state;
struct rtlx_channel channel[RTLX_CHANNELS];
} rtlx_info_t;
#endif

View File

@ -431,6 +431,10 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
extern void *set_vi_handler (int n, void *addr);
extern void *set_vi_srs_handler (int n, void *addr, int regset);
extern void *set_except_vector(int n, void *addr);
extern void per_cpu_trap_init(void);

View File

@ -21,4 +21,7 @@
extern void (*board_be_init)(void);
extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
extern void (*board_nmi_handler_setup)(void);
extern void (*board_ejtag_handler_setup)(void);
#endif /* _ASM_TRAPS_H */