Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu:
  m68k: Revive reporting of spurious interrupts
  m68knommu: Move forward declaration of do_IRQ() from machdep.h to irq.h
  m68k: fix some atomic operation asm address modes for ColdFire
  m68k: use CPU_HAS_NO_BITFIELDS for signal functions
  m68k: merge and clean up delay.h files
  m68knommu: correctly use trap_init
  m68knommu: merge ColdFire 5206 and 5206e platform code
  m68k: merge mmu and non-mmu bitops.h
  m68k: merge MMU and non MMU versions of system.h
  m68k: merge MMU and non-MMU versions of asm/hardirq.h
  m68k: merge the non-mmu and mmu versions of module.c
  m68knommu: Fix printk() format in free_initrd_mem()
  m68knommu: Make empty_zero_page "void *", like on m68k
This commit is contained in:
Linus Torvalds 2011-07-25 22:50:54 -07:00
commit 3b76eefe0f
44 changed files with 1077 additions and 1910 deletions

View file

@ -41,6 +41,10 @@ config NO_DMA
config ZONE_DMA
bool
default y
config CPU_HAS_NO_BITFIELDS
bool
config HZ
int
default 1000 if CLEOPATRA

View file

@ -16,6 +16,7 @@ config GENERIC_CLOCKEVENTS
config M68000
bool
select CPU_HAS_NO_BITFIELDS
help
The Freescale (was Motorola) 68000 CPU is the first generation of
the well known M68K family of processors. The CPU core as well as
@ -25,6 +26,7 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
help
The Freescale (was then Motorola) CPU32 is a CPU core that is
based on the 68020 processor. For the most part it is used in
@ -34,6 +36,7 @@ config COLDFIRE
bool
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
select CPU_HAS_NO_BITFIELDS
help
The Freescale ColdFire family of processors is a modern derivitive
of the 68000 processor family. They are mainly targeted at embedded

View file

@ -13,7 +13,7 @@ platform-$(CONFIG_M68EZ328) := 68EZ328
platform-$(CONFIG_M68VZ328) := 68VZ328
platform-$(CONFIG_M68360) := 68360
platform-$(CONFIG_M5206) := 5206
platform-$(CONFIG_M5206e) := 5206e
platform-$(CONFIG_M5206e) := 5206
platform-$(CONFIG_M520x) := 520x
platform-$(CONFIG_M523x) := 523x
platform-$(CONFIG_M5249) := 5249

View file

@ -169,18 +169,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
: "id" (i));
: ASM_DI (i));
return c != 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)

View file

@ -1,5 +1,530 @@
#ifdef __uClinux__
#include "bitops_no.h"
#else
#include "bitops_mm.h"
#ifndef _M68K_BITOPS_H
#define _M68K_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
/*
* Bit access functions vary across the ColdFire and 68k families.
* So we will break them out here, and then macro in the ones we want.
*
* ColdFire - supports standard bset/bclr/bchg with register operand only
* 68000 - supports standard bset/bclr/bchg with memory operand
* >= 68020 - also supports the bfset/bfclr/bfchg instructions
*
* Although it is possible to use only the bset/bclr/bchg with register
* operands on all platforms you end up with larger generated code.
* So we use the best form possible on a given platform.
*/
static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
#else
#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bset_mem_set_bit(nr, vaddr) : \
bfset_mem_set_bit(nr, vaddr))
#endif
#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
#else
#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bclr_mem_clear_bit(nr, vaddr) : \
bfclr_mem_clear_bit(nr, vaddr))
#endif
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfchg %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
#else
#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bchg_mem_change_bit(nr, vaddr) : \
bfchg_mem_change_bit(nr, vaddr))
#endif
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
static inline int test_bit(int nr, const unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int bset_reg_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bset_mem_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfset_mem_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
#else
#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bset_mem_test_and_set_bit(nr, vaddr) : \
bfset_mem_test_and_set_bit(nr, vaddr))
#endif
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
static inline int bclr_reg_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bclr_mem_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfclr_mem_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
#else
#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bclr_mem_test_and_clear_bit(nr, vaddr) : \
bfclr_mem_test_and_clear_bit(nr, vaddr))
#endif
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
static inline int bchg_reg_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bchg_mem_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfchg_mem_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
#else
#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bchg_mem_test_and_change_bit(nr, vaddr) : \
bfchg_mem_test_and_change_bit(nr, vaddr))
#endif
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
/*
* The true 68020 and more advanced processors support the "bfffo"
* instruction for finding bits. ColdFire and simple 68000 parts
* (including CPU32) do not support this. They simply use the generic
* functions.
*/
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffz.h>
#else
static inline int find_first_zero_bit(const unsigned long *vaddr,
unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = ~*p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_zero_bit find_first_zero_bit
static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = ~*p++ & (~0UL << bit);
offset -= bit;
/* Look for zero in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No zero yet, search remaining full bytes for a zero */
return offset + find_first_zero_bit(p, size - offset);
}
#define find_next_zero_bit find_next_zero_bit
static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = *p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_bit find_first_bit
static inline int find_next_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = *p++ & (~0UL << bit);
offset -= bit;
/* Look for one in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No one yet, search remaining full bytes for a one */
return offset + find_first_bit(p, size - offset);
}
#define find_next_bit find_next_bit
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long ffz(unsigned long word)
{
int res;
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (~word & -~word));
return res ^ 31;
}
#endif
#ifdef __KERNEL__
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
/*
* The newer ColdFire family members support a "bitrev" instruction
* and we can use that to implement a fast ffs. Older Coldfire parts,
* and normal 68000 parts don't have anything special, so we use the
* generic functions for those.
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
!defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
static inline int __ffs(int x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
: "=d" (x)
: "0" (x));
return x;
}
static inline int ffs(int x)
{
if (!x)
return 0;
return __ffs(x) + 1;
}
#else
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#else
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int cnt;
__asm__ ("bfffo %1{#0:#0},%0"
: "=d" (cnt)
: "dm" (x & -x));
return 32 - cnt;
}
#define __ffs(x) (ffs(x) - 1)
/*
* fls: find last bit set.
*/
static inline int fls(int x)
{
int cnt;
__asm__ ("bfffo %1{#0,#0},%0"
: "=d" (cnt)
: "dm" (x));
return 32 - cnt;
}
static inline int __fls(int x)
{
return fls(x) - 1;
}
#endif
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */

View file

@ -1,501 +0,0 @@
#ifndef _M68K_BITOPS_H
#define _M68K_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
/*
* Require 68020 or better.
*
* They use the standard big-endian m680x0 bit ordering.
*/
#define test_and_set_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_test_and_set_bit(nr, vaddr) : \
__generic_test_and_set_bit(nr, vaddr))
#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
#define set_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_set_bit(nr, vaddr) : \
__generic_set_bit(nr, vaddr))
#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
#define test_and_clear_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_test_and_clear_bit(nr, vaddr) : \
__generic_test_and_clear_bit(nr, vaddr))
#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
#define clear_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_clear_bit(nr, vaddr) : \
__generic_clear_bit(nr, vaddr))
#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
#define test_and_change_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_test_and_change_bit(nr, vaddr) : \
__generic_test_and_change_bit(nr, vaddr))
#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
: "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
return retval;
}
#define change_bit(nr,vaddr) \
(__builtin_constant_p(nr) ? \
__constant_change_bit(nr, vaddr) : \
__generic_change_bit(nr, vaddr))
static inline void __constant_change_bit(int nr, unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (*p) : "di" (nr & 7));
}
static inline void __generic_change_bit(int nr, unsigned long *vaddr)
{
__asm__ __volatile__ ("bfchg %1{%0:#1}"
: : "d" (nr^31), "o" (*vaddr) : "memory");
}
static inline int test_bit(int nr, const unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int find_first_zero_bit(const unsigned long *vaddr,
unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = ~*p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_zero_bit find_first_zero_bit
static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = ~*p++ & (~0UL << bit);
offset -= bit;
/* Look for zero in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No zero yet, search remaining full bytes for a zero */
return offset + find_first_zero_bit(p, size - offset);
}
#define find_next_zero_bit find_next_zero_bit
static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = *p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_bit find_first_bit
static inline int find_next_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = *p++ & (~0UL << bit);
offset -= bit;
/* Look for one in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No one yet, search remaining full bytes for a one */
return offset + find_first_bit(p, size - offset);
}
#define find_next_bit find_next_bit
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long ffz(unsigned long word)
{
int res;
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (~word & -~word));
return res ^ 31;
}
#ifdef __KERNEL__
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int cnt;
asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
return 32 - cnt;
}
#define __ffs(x) (ffs(x) - 1)
/*
* fls: find last bit set.
*/
static inline int fls(int x)
{
int cnt;
asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
return 32 - cnt;
}
static inline int __fls(int x)
{
return fls(x) - 1;
}
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
/* Bitmap functions for the little endian bitmap. */
static inline void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ 24, addr);
}
static inline void __clear_bit_le(int nr, void *addr)
{
__clear_bit(nr ^ 24, addr);
}
static inline int __test_and_set_bit_le(int nr, void *addr)
{
return __test_and_set_bit(nr ^ 24, addr);
}
static inline int test_and_set_bit_le(int nr, void *addr)
{
return test_and_set_bit(nr ^ 24, addr);
}
static inline int __test_and_clear_bit_le(int nr, void *addr)
{
return __test_and_clear_bit(nr ^ 24, addr);
}
static inline int test_and_clear_bit_le(int nr, void *addr)
{
return test_and_clear_bit(nr ^ 24, addr);
}
static inline int test_bit_le(int nr, const void *vaddr)
{
const unsigned char *p = vaddr;
return (p[nr >> 3] & (1U << (nr & 7))) != 0;
}
static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
{
const unsigned long *p = vaddr, *addr = vaddr;
int res = 0;
unsigned int words;
if (!size)
return 0;
words = (size >> 5) + ((size & 31) > 0);
while (*p++ == ~0UL) {
if (--words == 0)
goto out;
}
--p;
for (res = 0; res < 32; res++)
if (!test_bit_le(res, p))
break;
out:
res += (p - addr) * 32;
return res < size ? res : size;
}
#define find_first_zero_bit_le find_first_zero_bit_le
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr;
int bit = offset & 31UL, res;
if (offset >= size)
return size;
p += offset >> 5;
if (bit) {
offset -= bit;
/* Look for zero in first longword */
for (res = bit; res < 32; res++)
if (!test_bit_le(res, p)) {
offset += res;
return offset < size ? offset : size;
}
p++;
offset += 32;
if (offset >= size)
return size;
}
/* No zero yet, search remaining full bytes for a zero */
return offset + find_first_zero_bit_le(p, size - offset);
}
#define find_next_zero_bit_le find_next_zero_bit_le
static inline int find_first_bit_le(const void *vaddr, unsigned size)
{
const unsigned long *p = vaddr, *addr = vaddr;
int res = 0;
unsigned int words;
if (!size)
return 0;
words = (size >> 5) + ((size & 31) > 0);
while (*p++ == 0UL) {
if (--words == 0)
goto out;
}
--p;
for (res = 0; res < 32; res++)
if (test_bit_le(res, p))
break;
out:
res += (p - addr) * 32;
return res < size ? res : size;
}
#define find_first_bit_le find_first_bit_le
static inline unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr;
int bit = offset & 31UL, res;
if (offset >= size)
return size;
p += offset >> 5;
if (bit) {
offset -= bit;
/* Look for one in first longword */
for (res = bit; res < 32; res++)
if (test_bit_le(res, p)) {
offset += res;
return offset < size ? offset : size;
}
p++;
offset += 32;
if (offset >= size)
return size;
}
/* No set bit yet, search remaining full bytes for a set bit */
return offset + find_first_bit_le(p, size - offset);
}
#define find_next_bit_le find_next_bit_le
/* Bitmap functions for the ext2 filesystem. */
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit_le(nr, addr)
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit_le(nr, addr)
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */

View file

@ -1,333 +0,0 @@
#ifndef _M68KNOMMU_BITOPS_H
#define _M68KNOMMU_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*/
#include <linux/compiler.h>
#include <asm/byteorder.h> /* swab32 */
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#if defined (__mcfisaaplus__) || defined (__mcfisac__)
static inline int ffs(unsigned int val)
{
if (!val)
return 0;
asm volatile(
"bitrev %0\n\t"
"ff1 %0\n\t"
: "=d" (val)
: "0" (val)
);
val++;
return val;
}
static inline int __ffs(unsigned int val)
{
asm volatile(
"bitrev %0\n\t"
"ff1 %0\n\t"
: "=d" (val)
: "0" (val)
);
return val;
}
#else
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h>
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
{
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0", "cc");
#else
__asm__ __volatile__ ("bset %1,%0"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
: "cc");
#endif
}
#define __set_bit(nr, addr) set_bit(nr, addr)
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
{
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0", "cc");
#else
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
: "cc");
#endif
}
#define __clear_bit(nr, addr) clear_bit(nr, addr)
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
{
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0", "cc");
#else
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
: "cc");
#endif
}
#define __change_bit(nr, addr) change_bit(nr, addr)
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval;
}
#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval;
}
#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("bchg %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval;
}
#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
/*
* This routine doesn't need to be atomic.
*/
static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
{
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
}
static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
{
int * a = (int *) addr;
int mask;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
return ((mask & *a) != 0);
}
#define test_bit(nr,addr) \
(__builtin_constant_p(nr) ? \
__constant_test_bit((nr),(addr)) : \
__test_bit((nr),(addr)))
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
static inline void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline void __clear_bit_le(int nr, void *addr)
{
__clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
}
static inline int __test_and_set_bit_le(int nr, volatile void *addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval;
}
static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
: "di" (nr)
/* No clobber */);
#endif
return retval;
}
#include <asm-generic/bitops/ext2-atomic.h>
static inline int test_bit_le(int nr, const volatile void *addr)
{
char retval;
#ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
: "=d" (retval)
: "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
: "%a0");
#else
__asm__ __volatile__ ("btst %2,%1; sne %0"
: "=d" (retval)
: "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
/* No clobber */);
#endif
return retval;
}
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= 31UL;
if(offset) {
/* We hold the little endian value in tmp, but then the
* shift is illegal. So we could keep a big endian value
* in tmp, like this:
*
* tmp = __swab32(*(p++));
* tmp |= ~0UL >> (32-offset);
*
* but this would decrease performance, so we change the
* shift:
*/
tmp = *(p++);
tmp |= __swab32(~0UL >> (32-offset));
if(size < 32)
goto found_first;
if(~tmp)
goto found_middle;
size -= 32;
result += 32;
}
while(size & ~31UL) {
if(~(tmp = *(p++)))
goto found_middle;
result += 32;
size -= 32;
}
if(!size)
return result;
tmp = *p;
found_first:
/* tmp is little endian, so we would have to swab the shift,
* see above. But then we have to swab tmp below for ffz, so
* we might as well do this here.
*/
return result + ffz(__swab32(tmp) | (~0UL << size));
found_middle:
return result + ffz(__swab32(tmp));
}
#define find_next_zero_bit_le find_next_zero_bit_le
extern unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* _M68KNOMMU_BITOPS_H */

View file

@ -1,5 +1,96 @@
#ifdef __uClinux__
#include "delay_no.h"
#ifndef _M68K_DELAY_H
#define _M68K_DELAY_H
#include <asm/param.h>
/*
* Copyright (C) 1994 Hamish Macdonald
* Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
*/
#if defined(CONFIG_COLDFIRE)
/*
* The ColdFire runs the delay loop at significantly different speeds
* depending upon long word alignment or not. We'll pad it to
* long word alignment which is the faster version.
* The 0x4a8e is of course a 'tstl %fp' instruction. This is better
* than using a NOP (0x4e71) instruction because it executes in one
* cycle not three and doesn't allow for an arbitrary delay waiting
* for bus cycles to finish. Also fp/a6 isn't likely to cause a
* stall waiting for the register to become valid if such is added
* to the coldfire at some stage.
*/
#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
#else
#include "delay_mm.h"
/*
* No instruction alignment required for other m68k types.
*/
#define DELAY_ALIGN
#endif
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__ (
DELAY_ALIGN
"1: subql #1,%0\n\t"
"jcc 1b"
: "=d" (loops)
: "0" (loops));
}
extern void __bad_udelay(void);
#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
/*
* The simpler m68k and ColdFire processors do not have a 32*32->64
* multiply instruction. So we need to handle them a little differently.
* We use a bit of shifting and a single 32*32->32 multiply to get close.
* This is a macro so that the const version can factor out the first
* multiply and shift.
*/
#define HZSCALE (268435456 / (1000000 / HZ))
#define __const_udelay(u) \
__delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
#else
static inline void __xdelay(unsigned long xloops)
{
unsigned long tmp;
__asm__ ("mulul %2,%0:%1"
: "=d" (xloops), "=d" (tmp)
: "d" (xloops), "1" (loops_per_jiffy));
__delay(xloops * HZ);
}
/*
* The definition of __const_udelay is specifically made a macro so that
* the const factor (4295 = 2**32 / 1000000) can be optimized out when
* the delay is a const.
*/
#define __const_udelay(n) (__xdelay((n) * 4295))
#endif
static inline void __udelay(unsigned long usecs)
{
__const_udelay(usecs);
}
/*
* Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
#endif /* defined(_M68K_DELAY_H) */

View file

@ -1,57 +0,0 @@
#ifndef _M68K_DELAY_H
#define _M68K_DELAY_H
#include <asm/param.h>
/*
* Copyright (C) 1994 Hamish Macdonald
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
*/
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
: "=d" (loops) : "0" (loops));
}
extern void __bad_udelay(void);
/*
* Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
static inline void __const_udelay(unsigned long xloops)
{
unsigned long tmp;
__asm__ ("mulul %2,%0:%1"
: "=d" (xloops), "=d" (tmp)
: "d" (xloops), "1" (loops_per_jiffy));
__delay(xloops * HZ);
}
static inline void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 4295); /* 2**32 / 1000000 */
}
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
__udelay(n))
static inline unsigned long muldiv(unsigned long a, unsigned long b,
unsigned long c)
{
unsigned long tmp;
__asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
: "=d" (tmp), "=d" (a)
: "d" (b), "d" (c), "1" (a));
return a;
}
#endif /* defined(_M68K_DELAY_H) */

View file

@ -1,76 +0,0 @@
#ifndef _M68KNOMMU_DELAY_H
#define _M68KNOMMU_DELAY_H
/*
* Copyright (C) 1994 Hamish Macdonald
* Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
*/
#include <asm/param.h>
static inline void __delay(unsigned long loops)
{
#if defined(CONFIG_COLDFIRE)
/* The coldfire runs this loop at significantly different speeds
* depending upon long word alignment or not. We'll pad it to
* long word alignment which is the faster version.
* The 0x4a8e is of course a 'tstl %fp' instruction. This is better
* than using a NOP (0x4e71) instruction because it executes in one
* cycle not three and doesn't allow for an arbitrary delay waiting
* for bus cycles to finish. Also fp/a6 isn't likely to cause a
* stall waiting for the register to become valid if such is added
* to the coldfire at some stage.
*/
__asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
"1: subql #1, %0\n\t"
"jcc 1b"
: "=d" (loops) : "0" (loops));
#else
__asm__ __volatile__ ( "1: subql #1, %0\n\t"
"jcc 1b"
: "=d" (loops) : "0" (loops));
#endif
}
/*
* Ideally we use a 32*32->64 multiply to calculate the number of
* loop iterations, but the older standard 68k and ColdFire do not
* have this instruction. So for them we have a clsoe approximation
* loop using 32*32->32 multiplies only. This calculation based on
* the ARM version of delay.
*
* We want to implement:
*
* loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
*/
#define HZSCALE (268435456 / (1000000/HZ))
extern unsigned long loops_per_jiffy;
static inline void _udelay(unsigned long usecs)
{
#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
defined(CONFIG_COLDFIRE)
__delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
#else
unsigned long tmp;
usecs *= 4295; /* 2**32 / 1000000 */
__asm__ ("mulul %2,%0:%1"
: "=d" (usecs), "=d" (tmp)
: "d" (usecs), "1" (loops_per_jiffy*HZ));
__delay(usecs);
#endif
}
/*
* Moved the udelay() function into library code, no longer inlined.
* I had to change the algorithm because we are overflowing now on
* the faster ColdFire parts. The code is a little bigger, so it makes
* sense to library it.
*/
extern void udelay(unsigned long usecs);
#endif /* defined(_M68KNOMMU_DELAY_H) */

View file

@ -96,11 +96,11 @@
.endm
.macro RDUSP
movel sw_usp,%a2
movel sw_usp,%a3
.endm
.macro WRUSP
movel %a0,sw_usp
movel %a3,sw_usp
.endm
#else /* !CONFIG_COLDFIRE_SW_A7 */
@ -127,13 +127,13 @@
.endm
.macro RDUSP
/*move %usp,%a2*/
.word 0x4e6a
/*move %usp,%a3*/
.word 0x4e6b
.endm
.macro WRUSP
/*move %a0,%usp*/
.word 0x4e60
/*move %a3,%usp*/
.word 0x4e63
.endm
#endif /* !CONFIG_COLDFIRE_SW_A7 */

View file

@ -1,5 +1,34 @@
#ifdef __uClinux__
#include "hardirq_no.h"
#else
#include "hardirq_mm.h"
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
#include <linux/threads.h>
#include <linux/cache.h>
#include <asm/irq.h>
#define HARDIRQ_BITS 8
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#ifdef CONFIG_MMU
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#else
#include <asm-generic/hardirq.h>
#endif /* !CONFIG_MMU */
#endif

View file

@ -1,16 +0,0 @@
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
#include <linux/threads.h>
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define HARDIRQ_BITS 8
#endif

View file

@ -1,19 +0,0 @@
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
#include <asm/irq.h>
#define HARDIRQ_BITS 8
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#include <asm-generic/hardirq.h>
#endif /* __M68K_HARDIRQ_H */

View file

@ -32,15 +32,6 @@
#include <linux/irqreturn.h>
#include <linux/spinlock_types.h>
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Interrupt source definitions
* General interrupt sources are the level 1-7.
@ -131,4 +122,6 @@ asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
#define irq_canonicalize(irq) (irq)
#endif /* CONFIG_MMU */
asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
#endif /* _M68K_IRQ_H_ */

View file

@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
extern void config_BSP(char *command, int len);
extern void do_IRQ(int irq, struct pt_regs *fp);
#endif /* _M68K_MACHDEP_H */

View file

@ -1,18 +1,6 @@
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
#ifdef CONFIG_MMU
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
enum m68k_fixup_type {
m68k_fixup_memoffset,
m68k_fixup_vnode_shift,
@ -23,24 +11,31 @@ struct m68k_fixup_info {
void *addr;
};
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
#ifdef CONFIG_MMU
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
#define m68k_fixup(type, addr) \
" .section \".m68k_fixup\",\"aw\"\n" \
" .long " #type "," #addr "\n" \
" .previous\n"
#endif /* CONFIG_MMU */
extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
struct module;
extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end);
#else
struct mod_arch_specific {
};
#endif /* CONFIG_MMU */
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr

View file

@ -150,7 +150,7 @@ typedef struct sigaltstack {
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#ifndef __uClinux__
#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
#define __HAVE_ARCH_SIG_BITOPS
static inline void sigaddset(sigset_t *set, int _sig)
@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
return word ^ 31;
}
#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
#ifdef __uClinux__
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#else
struct pt_regs;
extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
#else
#undef __HAVE_ARCH_SIG_BITOPS
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __uClinux__ */
#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
#endif /* _M68K_SIGNAL_H */

View file

@ -1,5 +1,193 @@
#ifdef __uClinux__
#include "system_no.h"
#ifndef _M68K_SYSTEM_H
#define _M68K_SYSTEM_H
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/segment.h>
#include <asm/entry.h>
#ifdef __KERNEL__
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
* also clears the TS-flag if the task we switched to has used the
* math co-processor latest.
*/
/*
* switch_to() saves the extra registers, that are not saved
* automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
* a0-a1. Some of these are used by schedule() and its predecessors
* and so we might get see unexpected behaviors when a task returns
* with unexpected register values.
*
* syscall stores these registers itself and none of them are used
* by syscall after the function in the syscall has been called.
*
* Beware that resume now expects *next to be in d1 and the offset of
* tss to be in a1. This saves a few instructions as we no longer have
* to push them onto the stack and read them back right after.
*
* 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
*
* Changed 96/09/19 by Andreas Schwab
* pass prev in a0, next in a1
*/
asmlinkage void resume(void);
#define switch_to(prev,next,last) do { \
register void *_prev __asm__ ("a0") = (prev); \
register void *_next __asm__ ("a1") = (next); \
register void *_last __asm__ ("d1"); \
__asm__ __volatile__("jbsr resume" \
: "=a" (_prev), "=a" (_next), "=d" (_last) \
: "0" (_prev), "1" (_next) \
: "d0", "d2", "d3", "d4", "d5"); \
(last) = _last; \
} while (0)
/*
* Force strict CPU ordering.
* Not really required on m68k...
*/
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() ((void)0)
#define set_mb(var, value) ({ (var) = (value); wmb(); })
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() ((void)0)
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
unsigned long flags, tmp;
local_irq_save(flags);
switch (size) {
case 1:
tmp = *(u8 *)ptr;
*(u8 *)ptr = x;
x = tmp;
break;
case 2:
tmp = *(u16 *)ptr;
*(u16 *)ptr = x;
x = tmp;
break;
case 4:
tmp = *(u32 *)ptr;
*(u32 *)ptr = x;
x = tmp;
break;
default:
BUG();
}
local_irq_restore(flags);
return x;
}
#else
#include "system_mm.h"
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"1:\n\t"
"casb %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"1:\n\t"
"casw %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"1:\n\t"
"casl %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
return x;
}
#endif
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#ifdef CONFIG_RMW_INSNS
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
__asm__ __volatile__ ("casb %0,%2,%1"
: "=d" (old), "=m" (*(char *)p)
: "d" (new), "0" (old), "m" (*(char *)p));
break;
case 2:
__asm__ __volatile__ ("casw %0,%2,%1"
: "=d" (old), "=m" (*(short *)p)
: "d" (new), "0" (old), "m" (*(short *)p));
break;
case 4:
__asm__ __volatile__ ("casl %0,%2,%1"
: "=d" (old), "=m" (*(int *)p)
: "d" (new), "0" (old), "m" (*(int *)p));
break;
}
return old;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#else
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#include <asm-generic/cmpxchg.h>
#endif
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */
#endif /* _M68K_SYSTEM_H */

View file

@ -1,193 +0,0 @@
#ifndef _M68K_SYSTEM_H
#define _M68K_SYSTEM_H
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/segment.h>
#include <asm/entry.h>
#ifdef __KERNEL__
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
* also clears the TS-flag if the task we switched to has used the
* math co-processor latest.
*/
/*
* switch_to() saves the extra registers, that are not saved
* automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
* a0-a1. Some of these are used by schedule() and its predecessors
* and so we might get see unexpected behaviors when a task returns
* with unexpected register values.
*
* syscall stores these registers itself and none of them are used
* by syscall after the function in the syscall has been called.
*
* Beware that resume now expects *next to be in d1 and the offset of
* tss to be in a1. This saves a few instructions as we no longer have
* to push them onto the stack and read them back right after.
*
* 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
*
* Changed 96/09/19 by Andreas Schwab
* pass prev in a0, next in a1
*/
asmlinkage void resume(void);
#define switch_to(prev,next,last) do { \
register void *_prev __asm__ ("a0") = (prev); \
register void *_next __asm__ ("a1") = (next); \
register void *_last __asm__ ("d1"); \
__asm__ __volatile__("jbsr resume" \
: "=a" (_prev), "=a" (_next), "=d" (_last) \
: "0" (_prev), "1" (_next) \
: "d0", "d2", "d3", "d4", "d5"); \
(last) = _last; \
} while (0)
/*
* Force strict CPU ordering.
* Not really required on m68k...
*/
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
#define mb() barrier()
#define rmb() barrier()
#define wmb() barrier()
#define read_barrier_depends() ((void)0)
#define set_mb(var, value) ({ (var) = (value); wmb(); })
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() ((void)0)
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
unsigned long flags, tmp;
local_irq_save(flags);
switch (size) {
case 1:
tmp = *(u8 *)ptr;
*(u8 *)ptr = x;
x = tmp;
break;
case 2:
tmp = *(u16 *)ptr;
*(u16 *)ptr = x;
x = tmp;
break;
case 4:
tmp = *(u32 *)ptr;
*(u32 *)ptr = x;
x = tmp;
break;
default:
BUG();
}
local_irq_restore(flags);
return x;
}
#else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"1:\n\t"
"casb %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"1:\n\t"
"casw %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"1:\n\t"
"casl %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
return x;
}
#endif
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#ifdef CONFIG_RMW_INSNS
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
__asm__ __volatile__ ("casb %0,%2,%1"
: "=d" (old), "=m" (*(char *)p)
: "d" (new), "0" (old), "m" (*(char *)p));
break;
case 2:
__asm__ __volatile__ ("casw %0,%2,%1"
: "=d" (old), "=m" (*(short *)p)
: "d" (new), "0" (old), "m" (*(short *)p));
break;
case 4:
__asm__ __volatile__ ("casl %0,%2,%1"
: "=d" (old), "=m" (*(int *)p)
: "d" (new), "0" (old), "m" (*(int *)p));
break;
}
return old;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#else
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#include <asm-generic/cmpxchg.h>
#endif
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */
#endif /* _M68K_SYSTEM_H */

View file

@ -1,153 +0,0 @@
#ifndef _M68KNOMMU_SYSTEM_H
#define _M68KNOMMU_SYSTEM_H
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/segment.h>
#include <asm/entry.h>
/*
* switch_to(n) should switch tasks to task ptr, first checking that
* ptr isn't the current task, in which case it does nothing. This
* also clears the TS-flag if the task we switched to has used the
* math co-processor latest.
*/
/*
* switch_to() saves the extra registers, that are not saved
* automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
* a0-a1. Some of these are used by schedule() and its predecessors
* and so we might get see unexpected behaviors when a task returns
* with unexpected register values.
*
* syscall stores these registers itself and none of them are used
* by syscall after the function in the syscall has been called.
*
* Beware that resume now expects *next to be in d1 and the offset of
* tss to be in a1. This saves a few instructions as we no longer have
* to push them onto the stack and read them back right after.
*
* 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
*
* Changed 96/09/19 by Andreas Schwab
* pass prev in a0, next in a1, offset of tss in d1, and whether
* the mm structures are shared in d2 (to avoid atc flushing).
*/
asmlinkage void resume(void);
#define switch_to(prev,next,last) \
{ \
void *_last; \
__asm__ __volatile__( \
"movel %1, %%a0\n\t" \
"movel %2, %%a1\n\t" \
"jbsr resume\n\t" \
"movel %%d1, %0\n\t" \
: "=d" (_last) \
: "d" (prev), "d" (next) \
: "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
(last) = _last; \
}
#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
/*
* Force strict CPU ordering.
* Not really required on m68k...
*/
#define nop() asm volatile ("nop"::)
#define mb() asm volatile ("" : : :"memory")
#define rmb() asm volatile ("" : : :"memory")
#define wmb() asm volatile ("" : : :"memory")
#define set_mb(var, value) ({ (var) = (value); wmb(); })
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() ((void)0)
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
#ifndef CONFIG_RMW_INSNS
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
unsigned long tmp, flags;
local_irq_save(flags);
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"moveb %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"movew %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"movel %1,%2"
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
local_irq_restore(flags);
return tmp;
}
#else
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 1:
__asm__ __volatile__
("moveb %2,%0\n\t"
"1:\n\t"
"casb %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 2:
__asm__ __volatile__
("movew %2,%0\n\t"
"1:\n\t"
"casw %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
case 4:
__asm__ __volatile__
("movel %2,%0\n\t"
"1:\n\t"
"casl %0,%1,%2\n\t"
"jne 1b"
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
return x;
}
#endif
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
#define arch_align_stack(x) (x)
#endif /* _M68KNOMMU_SYSTEM_H */

View file

@ -22,7 +22,6 @@ extern e_vector vectors[];
asmlinkage void auto_inthandler(void);
asmlinkage void user_inthandler(void);
asmlinkage void bad_inthandler(void);
extern void init_vectors(void);
#endif

View file

@ -28,3 +28,13 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
set_irq_regs(oldregs);
}
/* The number of spurious interrupts */
atomic_t irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}

View file

@ -1,5 +1,129 @@
#ifdef CONFIG_MMU
#include "module_mm.c"
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else
#include "module_no.c"
#define DEBUGP(fmt...)
#endif
#ifdef CONFIG_MODULES
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
return 0;
}
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end)
{
#ifdef CONFIG_MMU
struct m68k_fixup_info *fixup;
for (fixup = start; fixup < end; fixup++) {
switch (fixup->type) {
case m68k_fixup_memoffset:
*(u32 *)fixup->addr = m68k_memoffset;
break;
case m68k_fixup_vnode_shift:
*(u16 *)fixup->addr += m68k_virt_to_node_shift;
break;
}
}
#endif
}

View file

@ -1,128 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
#ifdef CONFIG_MODULES
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
return 0;
}
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end)
{
struct m68k_fixup_info *fixup;
for (fixup = start; fixup < end; fixup++) {
switch (fixup->type) {
case m68k_fixup_memoffset:
*(u32 *)fixup->addr = m68k_memoffset;
break;
case m68k_fixup_vnode_shift:
*(u16 *)fixup->addr += m68k_virt_to_node_shift;
break;
}
}
}

View file

@ -1,92 +0,0 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}

View file

@ -60,10 +60,6 @@ static char const * const vec_names[] = {
"MMU CONFIGURATION ERROR"
};
void __init trap_init(void)
{
}
void die_if_kernel(char *str, struct pt_regs *fp, int nr)
{
if (!(fp->sr & PS_S))

View file

@ -9,6 +9,6 @@ lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ifdef CONFIG_MMU
lib-y += string.o uaccess.o checksum_mm.o
else
lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o delay.o checksum_no.o
lib-y += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
endif

View file

@ -1,21 +0,0 @@
/*
* arch/m68knommu/lib/delay.c
*
* (C) Copyright 2004, Greg Ungerer <gerg@snapgear.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <asm/param.h>
#include <asm/delay.h>
EXPORT_SYMBOL(udelay);
void udelay(unsigned long usecs)
{
_udelay(usecs);
}

View file

@ -42,7 +42,7 @@
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long empty_zero_page;
void *empty_zero_page;
extern unsigned long memory_start;
extern unsigned long memory_end;
@ -62,8 +62,8 @@ void __init paging_init(void)
unsigned long end_mem = memory_end & PAGE_MASK;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
memset(empty_zero_page, 0, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
@ -120,7 +120,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
totalram_pages++;
pages++;
}
printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024));
pr_notice("Freeing initrd memory: %luk freed\n",
pages * (PAGE_SIZE / 1024));
}
#endif
@ -141,7 +142,7 @@ void free_initmem(void)
free_page(addr);
totalram_pages++;
}
printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
(addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
(int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
(int)(addr - PAGE_SIZE));

View file

@ -98,6 +98,12 @@ void m5206_cpu_reset(void)
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
#endif /* CONFIG_NETtel */
mach_reset = m5206_cpu_reset;
m5206_timers_init();
m5206_uarts_init();

View file

@ -1,18 +0,0 @@
#
# Makefile for the m68knommu linux kernel.
#
#
# If you want to play with the HW breakpoints then you will
# need to add define this, which will give you a stack backtrace
# on the console port whenever a DBG interrupt occurs. You have to
# set up you HW breakpoints to trigger a DBG interrupt:
#
# ccflags-y := -DTRAP_DBG_INTERRUPT
# asflags-y := -DTRAP_DBG_INTERRUPT
#
asflags-$(CONFIG_FULLDEBUG) := -DDEBUGGER_COMPATIBLE_CACHE=1
obj-y := config.o gpio.o

View file

@ -1,127 +0,0 @@
/***************************************************************************/
/*
* linux/arch/m68knommu/platform/5206e/config.c
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
/***************************************************************************/
static struct mcf_platform_uart m5206e_uart_platform[] = {
{
.mapbase = MCF_MBAR + MCFUART_BASE1,
.irq = 73,
},
{
.mapbase = MCF_MBAR + MCFUART_BASE2,
.irq = 74,
},
{ },
};
static struct platform_device m5206e_uart = {
.name = "mcfuart",
.id = 0,
.dev.platform_data = m5206e_uart_platform,
};
static struct platform_device *m5206e_devices[] __initdata = {
&m5206e_uart,
};
/***************************************************************************/
static void __init m5206e_uart_init_line(int line, int irq)
{
if (line == 0) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
writeb(irq, MCFUART_BASE1 + MCFUART_UIVR);
mcf_mapirq2imr(irq, MCFINTC_UART0);
} else if (line == 1) {
writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI2, MCF_MBAR + MCFSIM_UART2ICR);
writeb(irq, MCFUART_BASE2 + MCFUART_UIVR);
mcf_mapirq2imr(irq, MCFINTC_UART1);
}
}
static void __init m5206e_uarts_init(void)
{
const int nrlines = ARRAY_SIZE(m5206e_uart_platform);
int line;
for (line = 0; (line < nrlines); line++)
m5206e_uart_init_line(line, m5206e_uart_platform[line].irq);
}
/***************************************************************************/
static void __init m5206e_timers_init(void)
{
/* Timer1 is always used as system timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER1ICR);
mcf_mapirq2imr(MCF_IRQ_TIMER, MCFINTC_TIMER1);
#ifdef CONFIG_HIGHPROFILE
/* Timer2 is to be used as a high speed profile timer */
writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL7 | MCFSIM_ICR_PRI3,
MCF_MBAR + MCFSIM_TIMER2ICR);
mcf_mapirq2imr(MCF_IRQ_PROFILER, MCFINTC_TIMER2);
#endif
}
/***************************************************************************/
void m5206e_cpu_reset(void)
{
local_irq_disable();
/* Set watchdog to soft reset, and enabled */
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
for (;;)
/* wait for watchdog to timeout */;
}
/***************************************************************************/
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0xf0004000, size);
commandp[size-1] = 0;
#endif /* CONFIG_NETtel */
mach_reset = m5206e_cpu_reset;
m5206e_timers_init();
m5206e_uarts_init();
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
mcf_mapirq2imr(28, MCFINTC_EINT4);
mcf_mapirq2imr(31, MCFINTC_EINT7);
}
/***************************************************************************/
static int __init init_BSP(void)
{
platform_add_devices(m5206e_devices, ARRAY_SIZE(m5206e_devices));
return 0;
}
arch_initcall(init_BSP);
/***************************************************************************/

View file

@ -1,49 +0,0 @@
/*
* Coldfire generic GPIO support
*
* (C) Copyright 2009, Steven King <sfking@fdwdc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfgpio.h>
static struct mcf_gpio_chip mcf_gpio_chips[] = {
{
.gpio_chip = {
.label = "PP",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFSIM_PADDR,
.podr = (void __iomem *) MCFSIM_PADAT,
.ppdr = (void __iomem *) MCFSIM_PADAT,
},
};
static int __init mcf_gpio_init(void)
{
unsigned i = 0;
while (i < ARRAY_SIZE(mcf_gpio_chips))
(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
return 0;
}
core_initcall(mcf_gpio_init);

View file

@ -162,8 +162,6 @@ void __init init_IRQ(void)
{
int irq, edge;
init_vectors();
/* Mask all interrupt sources */
writel(0x88888888, MCF_MBAR + MCFSIM_ICR1);
writel(0x88888888, MCF_MBAR + MCFSIM_ICR2);

View file

@ -236,27 +236,26 @@ ret_from_interrupt:
* Handler for uninitialized and spurious interrupts.
*/
ENTRY(bad_interrupt)
addql #1,num_spurious
addql #1,irq_err_count
rte
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movel %a0,%d1 /* save prev thread in d1 */
movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
movel %usp,%a2 /* save usp */
movel %a2,%a0@(TASK_THREAD+THREAD_USP)
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
movel %usp,%a3 /* save usp */
movel %a3,%a0@(TASK_THREAD+THREAD_USP)
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
movel %a3,%usp
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
movel %a0,%usp
movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
rts

View file

@ -70,9 +70,6 @@ asmlinkage irqreturn_t inthandler7(void);
extern e_vector *_ramvec;
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
/* The 68k family did not have a good way to determine the source
* of interrupts until later in the family. The EC000 core does
* not provide the vector number on the stack, we vector everything
@ -155,7 +152,7 @@ static struct irq_chip intc_irq_chip = {
* This function should be called during kernel startup to initialize
* the machine vector table.
*/
void __init init_IRQ(void)
void __init trap_init(void)
{
int i;
@ -172,6 +169,11 @@ void __init init_IRQ(void)
_ramvec[69] = (e_vector) inthandler5;
_ramvec[70] = (e_vector) inthandler6;
_ramvec[71] = (e_vector) inthandler7;
}
void __init init_IRQ(void)
{
int i;
IVR = 0x40; /* Set DragonBall IVR (interrupt base) to 64 */

View file

@ -157,27 +157,26 @@ ret_from_interrupt:
* Handler for uninitialized and spurious interrupts.
*/
bad_interrupt:
addql #1,num_spurious
addql #1,irq_err_count
rte
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movel %a0,%d1 /* save prev thread in d1 */
movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
movel %usp,%a2 /* save usp */
movel %a2,%a0@(TASK_THREAD+THREAD_USP)
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
movel %usp,%a3 /* save usp */
movel %a3,%a0@(TASK_THREAD+THREAD_USP)
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
movel %a3,%usp
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore user stack */
movel %a0,%usp
movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
rts

View file

@ -34,9 +34,6 @@ asmlinkage void inthandler(void);
extern void *_ramvec[];
/* The number of spurious interrupts */
volatile unsigned int num_spurious;
static void intc_irq_unmask(struct irq_data *d)
{
pquicc->intr_cimr |= (1 << d->irq);
@ -63,9 +60,8 @@ static struct irq_chip intc_irq_chip = {
* This function should be called during kernel startup to initialize
* the vector table.
*/
void init_IRQ(void)
void __init trap_init(void)
{
int i;
int vba = (CPM_VECTOR_BASE<<4);
/* set up the vectors */
@ -130,6 +126,11 @@ void init_IRQ(void)
/* turn off all CPM interrupts */
pquicc->intr_cimr = 0x00000000;
}
void init_IRQ(void)
{
int i;
for (i = 0; (i < NR_IRQS); i++) {
irq_set_chip(i, &intc_irq_chip);

View file

@ -182,21 +182,23 @@ ENTRY(inthandler)
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
* This is always called in supervisor mode, so don't bother to save
* and restore sr; user's process sr is actually in the stack.
*/
ENTRY(resume)
movel %a0, %d1 /* get prev thread in d1 */
RDUSP
movel %a2,%a0@(TASK_THREAD+THREAD_USP)
movew %sr,%d1 /* save current status */
movew %d1,%a0@(TASK_THREAD+THREAD_SR)
movel %a0,%d1 /* get prev thread in d1 */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
RDUSP /* movel %usp,%a3 */
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
WRUSP
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
WRUSP /* movel %a3,%usp */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
movew %d7,%sr
RESTORE_SWITCH_STACK
rts

View file

@ -194,8 +194,6 @@ void __init init_IRQ(void)
{
int irq;
init_vectors();
/* Mask all interrupt sources */
__raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL);
#ifdef MCFICM_INTC1

View file

@ -171,8 +171,6 @@ void __init init_IRQ(void)
{
int irq, eirq;
init_vectors();
/* Mask all interrupt sources */
__raw_writeb(0xff, MCFINTC0_SIMR);
if (MCFINTC1_SIMR)

View file

@ -139,7 +139,6 @@ void __init init_IRQ(void)
{
int irq;
init_vectors();
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {

View file

@ -35,21 +35,13 @@ asmlinkage void dbginterrupt_c(struct frame *fp)
extern e_vector *_ramvec;
void set_evector(int vecnum, void (*handler)(void))
{
if (vecnum >= 0 && vecnum <= 255)
_ramvec[vecnum] = handler;
}
/***************************************************************************/
/* Assembler routines */
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void system_call(void);
asmlinkage void inthandler(void);
void __init init_vectors(void)
void __init trap_init(void)
{
int i;