1
0
Fork 0

Merge master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6: (78 commits)
  [PARISC] Use symbolic last syscall in __NR_Linux_syscalls
  [PARISC] Add missing statfs64 and fstatfs64 syscalls
  Revert "[PARISC] Optimize TLB flush on SMP systems"
  [PARISC] Compat signal fixes for 64-bit parisc
  [PARISC] Reorder syscalls to match unistd.h
  Revert "[PATCH] make kernel/signal.c:kill_proc_info() static"
  [PARISC] fix sys_rt_sigqueueinfo
  [PARISC] fix section mismatch warnings in harmony sound driver
  [PARISC] do not export get_register/set_register
  [PARISC] add ENTRY()/ENDPROC() and simplify assembly of HP/UX emulation code
  [PARISC] convert to use CONFIG_64BIT instead of __LP64__
  [PARISC] use CONFIG_64BIT instead of __LP64__
  [PARISC] add ASM_EXCEPTIONTABLE_ENTRY() macro
  [PARISC] more ENTRY(), ENDPROC(), END() conversions
  [PARISC] fix ENTRY() and ENDPROC() for 64bit-parisc
  [PARISC] Fixes /proc/cpuinfo cache output on B160L
  [PARISC] implement standard ENTRY(), END() and ENDPROC()
  [PARISC] kill ENTRY_SYS_CPUS
  [PARISC] clean up debugging printks in smp.c
  [PARISC] factor syscall_restart code out of do_signal
  ...

Fix conflict in include/linux/sched.h due to kill_proc_info() being made
publicly available to PARISC again.
hifive-unleashed-5.1
Linus Torvalds 2007-02-26 12:48:06 -08:00
commit b0138a6cb7
97 changed files with 1585 additions and 1919 deletions

View File

@ -37,6 +37,11 @@ config GENERIC_FIND_NEXT_BIT
bool
default y
config GENERIC_BUG
bool
default y
depends on BUG
config GENERIC_HWEIGHT
bool
default y
@ -45,6 +50,10 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config TIME_LOW_RES
bool
depends on SMP

View File

@ -35,12 +35,8 @@ FINAL_LD=$(CROSS_COMPILE)ld --warn-common --warn-section-align
OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
GCC_VERSION := $(call cc-version)
ifneq ($(shell if [ -z $(GCC_VERSION) ] ; then echo "bad"; fi ;),)
$(error Sorry, couldn't find ($(cc-version)).)
endif
ifneq ($(shell if [ $(GCC_VERSION) -lt 0303 ] ; then echo "bad"; fi ;),)
$(error Sorry, your compiler is too old ($(GCC_VERSION)). GCC v3.3 or above is required.)
ifneq ($(call cc-ifversion, -lt, 0303, "bad"),)
$(error Sorry, GCC v3.3 or above is required.)
endif
cflags-y := -pipe

View File

@ -18,17 +18,16 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/unistd.h>
#include <asm/assembly.h>
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/unistd.h>
#define ENTRY_NAME(_name_) .word _name_
#define ENTRY_NAME(_name_) ASM_ULONG_INSN _name_
.section .rodata,"a"
.align 4
.export hpux_call_table
.import hpux_unimplemented_wrapper
hpux_call_table:
ENTRY(hpux_call_table)
ENTRY_NAME(sys_ni_syscall) /* 0 */
ENTRY_NAME(sys_exit)
ENTRY_NAME(hpux_fork_wrapper)
@ -542,5 +541,6 @@ hpux_call_table:
ENTRY_NAME(hpux_unimplemented_wrapper) /* 510 */
ENTRY_NAME(hpux_unimplemented_wrapper)
ENTRY_NAME(hpux_unimplemented_wrapper)
END(hpux_call_table)
.end

View File

@ -35,13 +35,13 @@ int hpux_execve(struct pt_regs *regs)
int error;
char *filename;
filename = getname((char *) regs->gr[26]);
filename = getname((char __user *) regs->gr[26]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, (char **) regs->gr[25],
(char **)regs->gr[24], regs);
error = do_execve(filename, (char __user * __user *) regs->gr[25],
(char __user * __user *) regs->gr[24], regs);
if (error == 0) {
task_lock(current);
@ -63,19 +63,19 @@ struct hpux_dirent {
};
struct getdents_callback {
struct hpux_dirent *current_dir;
struct hpux_dirent *previous;
struct hpux_dirent __user *current_dir;
struct hpux_dirent __user *previous;
int count;
int error;
};
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned d_type)
{
struct hpux_dirent * dirent;
struct hpux_dirent __user * dirent;
struct getdents_callback * buf = (struct getdents_callback *) __buf;
ino_t d_ino;
int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
@ -105,10 +105,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
#undef NAME_OFFSET
#undef ROUND_UP
int hpux_getdents(unsigned int fd, struct hpux_dirent *dirent, unsigned int count)
int hpux_getdents(unsigned int fd, struct hpux_dirent __user *dirent, unsigned int count)
{
struct file * file;
struct hpux_dirent * lastdirent;
struct hpux_dirent __user * lastdirent;
struct getdents_callback buf;
int error = -EBADF;
@ -143,7 +143,7 @@ int hpux_mount(const char *fs, const char *path, int mflag,
return -ENOSYS;
}
static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 *statbuf)
static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 __user *statbuf)
{
struct hpux_stat64 tmp;
@ -169,7 +169,7 @@ static int cp_hpux_stat(struct kstat *stat, struct hpux_stat64 *statbuf)
return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
}
long hpux_stat64(char *filename, struct hpux_stat64 *statbuf)
long hpux_stat64(char __user *filename, struct hpux_stat64 __user *statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
@ -180,7 +180,7 @@ long hpux_stat64(char *filename, struct hpux_stat64 *statbuf)
return error;
}
long hpux_fstat64(unsigned int fd, struct hpux_stat64 *statbuf)
long hpux_fstat64(unsigned int fd, struct hpux_stat64 __user *statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
@ -191,7 +191,7 @@ long hpux_fstat64(unsigned int fd, struct hpux_stat64 *statbuf)
return error;
}
long hpux_lstat64(char *filename, struct hpux_stat64 *statbuf)
long hpux_lstat64(char __user *filename, struct hpux_stat64 __user *statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);

View File

@ -12,27 +12,18 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#ifdef __LP64__
.level 2.0w
#else
.level 1.1
#endif
.level LEVEL
.text
#ifdef __LP64__
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
.import hpux_call_table
.import hpux_syscall_exit,code
.export hpux_gateway_page
.align 4096
hpux_gateway_page:
ENTRY(hpux_gateway_page)
nop
#ifdef __LP64__
#ifdef CONFIG_64BIT
#warning NEEDS WORK for 64-bit
#endif
ldw -64(%r30), %r29 ;! 8th argument
@ -101,7 +92,7 @@ hpux_gateway_page:
ldo R%hpux_call_table(%r21), %r21
comiclr,>>= __NR_HPUX_syscalls, %r22, %r0
b,n syscall_nosys
ldwx,s %r22(%r21), %r21
LDREGX %r22(%r21), %r21
ldil L%hpux_syscall_exit,%r2
be 0(%sr7,%r21)
ldo R%hpux_syscall_exit(%r2),%r2
@ -110,7 +101,7 @@ syscall_nosys:
ldil L%hpux_syscall_exit,%r1
be R%hpux_syscall_exit(%sr7,%r1)
ldo -ENOSYS(%r0),%r28
ENDPROC(hpux_gateway_page)
.align 4096
.export end_hpux_gateway_page
end_hpux_gateway_page:
ENTRY(end_hpux_gateway_page)

View File

@ -61,7 +61,7 @@ int hpux_ptrace(void)
return -ENOSYS;
}
int hpux_wait(int *stat_loc)
int hpux_wait(int __user *stat_loc)
{
return sys_waitpid(-1, stat_loc, 0);
}
@ -255,7 +255,7 @@ asmlinkage long hpux_fstatfs(unsigned int fd, struct hpux_statfs __user * buf)
/* TODO: Are these put_user calls OK? Should they pass an int?
* (I copied it from sys_i386.c like this.)
*/
static int hpux_uname(struct hpux_utsname *name)
static int hpux_uname(struct hpux_utsname __user *name)
{
int error;
@ -300,14 +300,14 @@ static int hpux_uname(struct hpux_utsname *name)
/* Note: HP-UX just uses the old suser() function to check perms
* in this system call. We'll use capable(CAP_SYS_ADMIN).
*/
int hpux_utssys(char *ubuf, int n, int type)
int hpux_utssys(char __user *ubuf, int n, int type)
{
int len;
int error;
switch( type ) {
case 0:
/* uname(): */
return( hpux_uname( (struct hpux_utsname *)ubuf ) );
return hpux_uname((struct hpux_utsname __user *)ubuf);
break ;
case 1:
/* Obsolete (used to be umask().) */
@ -315,8 +315,9 @@ int hpux_utssys(char *ubuf, int n, int type)
break ;
case 2:
/* ustat(): */
return( hpux_ustat(new_decode_dev(n), (struct hpux_ustat *)ubuf) );
break ;
return hpux_ustat(new_decode_dev(n),
(struct hpux_ustat __user *)ubuf);
break;
case 3:
/* setuname():
*
@ -332,7 +333,7 @@ int hpux_utssys(char *ubuf, int n, int type)
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
return( sys_sethostname(ubuf, len) );
return sys_sethostname(ubuf, len);
break ;
case 4:
/* sethostname():
@ -346,7 +347,7 @@ int hpux_utssys(char *ubuf, int n, int type)
return -EINVAL ;
/* Unlike Linux, HP-UX truncates it if n is too big: */
len = (n <= __NEW_UTS_LEN) ? n : __NEW_UTS_LEN ;
return( sys_sethostname(ubuf, len) );
return sys_sethostname(ubuf, len);
break ;
case 5:
/* gethostname():
@ -356,7 +357,7 @@ int hpux_utssys(char *ubuf, int n, int type)
/* Unlike Linux, HP-UX returns an error if n==0: */
if ( n <= 0 )
return -EINVAL ;
return( sys_gethostname(ubuf, n) );
return sys_gethostname(ubuf, n);
break ;
case 6:
/* Supposedly called from setuname() in libc.
@ -420,7 +421,7 @@ int hpux_utssys(char *ubuf, int n, int type)
}
}
int hpux_getdomainname(char *name, int len)
int hpux_getdomainname(char __user *name, int len)
{
int nlen;
int err = -EFAULT;
@ -471,17 +472,18 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
printk(KERN_DEBUG "hpux_sysfs called with arg1='%lx'\n", arg1);
if ( opcode == 1 ) { /* GETFSIND */
len = strlen_user((char *)arg1);
char __user *user_fsname = (char __user *)arg1;
len = strlen_user(user_fsname);
printk(KERN_DEBUG "len of arg1 = %d\n", len);
if (len == 0)
return 0;
fsname = kmalloc(len, GFP_KERNEL);
if ( !fsname ) {
if (!fsname) {
printk(KERN_DEBUG "failed to kmalloc fsname\n");
return 0;
}
if ( copy_from_user(fsname, (char *)arg1, len) ) {
if (copy_from_user(fsname, user_fsname, len)) {
printk(KERN_DEBUG "failed to copy_from_user fsname\n");
kfree(fsname);
return 0;
@ -495,7 +497,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
fstype = 0;
} else {
fstype = 0;
};
}
kfree(fsname);
@ -509,7 +511,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2)
/* Table of syscall names and handle for unimplemented routines */
static const char *syscall_names[] = {
static const char * const syscall_names[] = {
"nosys", /* 0 */
"exit",
"fork",

View File

@ -20,19 +20,16 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#warning PA64 support needs more work...did first cut
#endif
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/signal.h>
#include <linux/linkage.h>
#ifdef __LP64__
.level 2.0w
#else
.level 1.1
#endif
.level LEVEL
.text
/* These should probably go in a header file somewhere.
@ -41,7 +38,7 @@
* register save/restore macros.
*/
.macro reg_save regs
#ifdef __LP64__
#ifdef CONFIG_64BIT
#warning NEEDS WORK for 64-bit
#endif
STREG %r3, PT_GR3(\regs)
@ -82,11 +79,9 @@
.endm
.export hpux_fork_wrapper
.export hpux_child_return
.import sys_fork
hpux_fork_wrapper:
ENTRY(hpux_fork_wrapper)
ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
;! pointer in task
reg_save %r1
@ -128,27 +123,26 @@ fork_return:
fork_exit:
bv %r0(%r2)
nop
ENDPROC(hpux_fork_wrapper)
/* Set the return value for the child */
hpux_child_return:
ENTRY(hpux_child_return)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
bl schedule_tail, %r2
nop
bl,n schedule_tail, %r2
#endif
LDREG TASK_PT_GR19-TASK_SZ_ALGN-128(%r30),%r2
b fork_return
copy %r0,%r28
ENDPROC(hpux_child_return)
.export hpux_execve_wrapper
.export hpux_execv_wrapper
.import hpux_execve
hpux_execv_wrapper:
ENTRY(hpux_execv_wrapper)
copy %r0,%r24 /* NULL environment */
hpux_execve_wrapper:
ENTRY(hpux_execve_wrapper)
ldo TASK_REGS-TASK_SZ_ALGN-64(%r30),%r1 ;! get pt regs
@ -187,13 +181,13 @@ hpux_execve_wrapper:
exec_error:
bv %r0(%r19)
nop
ENDPROC(hpux_execv_wrapper)
.export hpux_pipe_wrapper
.import hpux_pipe
/* HP-UX expects pipefd's returned in r28 & r29 */
hpux_pipe_wrapper:
ENTRY(hpux_pipe_wrapper)
STREG %r2,-20(%r30)
ldo 64(%r30),%r30
bl hpux_pipe,%r2
@ -212,12 +206,11 @@ hpux_pipe_wrapper:
pipe_exit:
bv %r0(%r2)
ldo -64(%r30),%r30
ENDPROC(hpux_pipe_wrapper)
.export hpux_syscall_exit
.import syscall_exit
hpux_syscall_exit:
ENTRY(hpux_syscall_exit)
/*
*
* HP-UX call return conventions:
@ -246,12 +239,12 @@ hpux_syscall_exit:
ldo 1(%r0),%r22
no_error:
b syscall_exit
nop
b,n syscall_exit
ENDPROC(hpux_syscall_exit)
.export hpux_unimplemented_wrapper
.import hpux_unimplemented
hpux_unimplemented_wrapper:
ENTRY(hpux_unimplemented_wrapper)
b hpux_unimplemented
STREG %r22,-64(%r30) /* overwrite arg8 with syscall number */
ENDPROC(hpux_unimplemented_wrapper)

View File

@ -44,7 +44,7 @@
#define BLANK() asm volatile("\n->" : : )
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64

View File

@ -68,16 +68,6 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
/* flushes EVERYTHING (tlb & cache) */
void
flush_all_caches(void)
{
flush_cache_all();
flush_tlb_all();
}
EXPORT_SYMBOL(flush_all_caches);
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
@ -99,7 +89,7 @@ show_cache_info(struct seq_file *m)
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
if (cache_info.dc_loop == 1)
if (cache_info.dc_loop != 1)
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
cache_info.dc_size/1024,
@ -270,6 +260,83 @@ void disable_sr_hashing(void)
panic("SpaceID hashing is still on!\n");
}
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
static inline int translation_exists(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t pte;
if(pgd_none(*pgd))
return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
/* We cannot take the pte lock here: flush_cache_page is usually
* called with pte lock already held. Whereas flush_dcache_page
* takes flush_dcache_mmap_lock, which is lower in the hierarchy:
* the vma itself is secure, but the pte might come or go racily.
*/
pte = *pte_offset_map(pmd, addr);
/* But pte_unmap() does nothing on this architecture */
/* Filter out coincidental file entries and swap entries */
if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
return 0;
return pte_pfn(pte) == pfn;
}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
* process; we're going to hijack both it and the user space %sr3 to
* temporarily make the non-current process current. We have to do
* this because cache flushing may cause a non-access tlb miss which
* the handlers have to fill in from the pgd of the non-current
* process. */
static inline void
flush_user_cache_page_non_current(struct vm_area_struct *vma,
unsigned long vmaddr)
{
/* save the current process space and pgd */
unsigned long space = mfsp(3), pgd = mfctl(25);
/* we don't mind taking interrups since they may not
* do anything with user space, but we can't
* be preempted here */
preempt_disable();
/* make us current */
mtctl(__pa(vma->vm_mm->pgd), 25);
mtsp(vma->vm_mm->context, 3);
flush_user_dcache_page(vmaddr);
if(vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
/* put the old current process back */
mtsp(space, 3);
mtctl(pgd, 25);
preempt_enable();
}
static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
if (likely(vma->vm_mm->context == mfsp(3))) {
flush_user_dcache_page(vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
} else {
flush_user_cache_page_non_current(vma, vmaddr);
}
}
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@ -342,7 +409,7 @@ void clear_user_page_asm(void *page, unsigned long vaddr)
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
void parisc_setup_cache_timing(void)
void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
@ -366,6 +433,9 @@ void parisc_setup_cache_timing(void)
if (!parisc_cache_flush_threshold)
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
if (parisc_cache_flush_threshold > cache_info.dc_size)
parisc_cache_flush_threshold = cache_info.dc_size;
printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
}
@ -410,3 +480,97 @@ void kunmap_parisc(void *addr)
}
EXPORT_SYMBOL(kunmap_parisc);
#endif
void __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
unsigned long npages;
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
mtsp(sid, 1);
purge_tlb_start();
if (split_tlb) {
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
} else {
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
}
purge_tlb_end();
}
}
static void cacheflush_h_tmp_function(void *dummy)
{
flush_cache_all_local();
}
void flush_cache_all(void)
{
on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
}
void flush_cache_mm(struct mm_struct *mm)
{
#ifdef CONFIG_SMP
flush_cache_all();
#else
flush_cache_all_local();
#endif
}
void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
}
void
flush_user_icache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(start,end);
flush_user_icache_range(start,end);
} else {
flush_cache_all();
}
}
void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
BUG_ON(!vma->vm_mm->context);
if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}

View File

@ -562,12 +562,23 @@ pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
pa_dev_attr_id(hversion, "0x%03x\n");
pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct parisc_device *padev = to_parisc_device(dev);
struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
}
static struct device_attribute parisc_device_attrs[] = {
__ATTR_RO(irq),
__ATTR_RO(hw_type),
__ATTR_RO(rev),
__ATTR_RO(hversion),
__ATTR_RO(sversion),
__ATTR_RO(modalias),
__ATTR_NULL,
};
@ -689,7 +700,9 @@ parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
.fn = check_parent,
};
device_for_each_child(parent, &recurse_data, descend_children);
if (device_for_each_child(parent, &recurse_data, descend_children))
/* nothing */;
return d.dev;
}
@ -835,8 +848,8 @@ static void print_parisc_device(struct parisc_device *dev)
static int count;
print_pa_hwpath(dev, hw_path);
printk(KERN_INFO "%d. %s at 0x%lx [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, dev->hpa.start, hw_path, dev->id.hw_type,
printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
if (dev->num_addrs) {

View File

@ -37,6 +37,8 @@
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
#define CMPIB cmpib,*
#define CMPB cmpb,*
@ -648,13 +650,11 @@
* the static part of the kernel address space.
*/
.export fault_vector_20
.text
.align 4096
fault_vector_20:
ENTRY(fault_vector_20)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
@ -695,14 +695,13 @@ fault_vector_20:
def 29
def 30
def 31
END(fault_vector_20)
#ifndef CONFIG_64BIT
.export fault_vector_11
.align 2048
fault_vector_11:
ENTRY(fault_vector_11)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
@ -743,6 +742,7 @@ fault_vector_11:
def 29
def 30
def 31
END(fault_vector_11)
#endif
@ -762,9 +762,8 @@ fault_vector_11:
#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
#define CLONE_UNTRACED 0x00800000
.export __kernel_thread, code
.import do_fork
__kernel_thread:
ENTRY(__kernel_thread)
STREG %r2, -RP_OFFSET(%r30)
copy %r30, %r1
@ -797,6 +796,7 @@ __kernel_thread:
ldo -PT_SZ_ALGN(%r30), %r30
bv %r0(%r2)
nop
ENDPROC(__kernel_thread)
/*
* Child Returns here
@ -805,8 +805,7 @@ __kernel_thread:
* into task save area.
*/
.export ret_from_kernel_thread
ret_from_kernel_thread:
ENTRY(ret_from_kernel_thread)
/* Call schedule_tail first though */
BL schedule_tail, %r2
@ -833,10 +832,10 @@ ret_from_kernel_thread:
bv %r0(%r1)
#endif
ldi 0, %r26
ENDPROC(ret_from_kernel_thread)
.import sys_execve, code
.export __execve, code
__execve:
ENTRY(__execve)
copy %r2, %r15
copy %r30, %r16
ldo PT_SZ_ALGN(%r30), %r30
@ -856,16 +855,15 @@ __execve:
copy %r16, %r30
bv %r0(%r2)
nop
ENDPROC(__execve)
.align 4
/*
* struct task_struct *_switch_to(struct task_struct *prev,
* struct task_struct *next)
*
* switch kernel stacks and return prev */
.export _switch_to, code
_switch_to:
ENTRY(_switch_to)
STREG %r2, -RP_OFFSET(%r30)
callee_save_float
@ -890,6 +888,7 @@ _switch_to_ret:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
ENDPROC(_switch_to)
/*
* Common rfi return path for interruptions, kernel execve, and
@ -907,8 +906,7 @@ _switch_to_ret:
.align 4096
.export syscall_exit_rfi
syscall_exit_rfi:
ENTRY(syscall_exit_rfi)
mfctl %cr30,%r16
LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16
@ -978,11 +976,36 @@ intr_check_resched:
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
.import do_notify_resume,code
intr_check_sig:
/* As above */
mfctl %cr30,%r1
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
LDREG TI_FLAGS(%r1),%r19
ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20
and,COND(<>) %r19, %r20, %r0
b,n intr_restore /* skip past if we've nothing to do */
/* This check is critical to having LWS
* working. The IASQ is zero on the gateway
* page and we cannot deliver any signals until
* we get off the gateway page.
*
* Only do signals if we are returning to user space
*/
LDREG PT_IASQ0(%r16), %r20
CMPIB=,n 0,%r20,intr_restore /* backward */
LDREG PT_IASQ1(%r16), %r20
CMPIB=,n 0,%r20,intr_restore /* backward */
copy %r0, %r25 /* long in_syscall = 0 */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_notify_resume,%r2
copy %r16, %r26 /* struct pt_regs *regs */
b,n intr_check_sig
intr_restore:
copy %r16,%r29
@ -1072,35 +1095,6 @@ intr_do_preempt:
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
#endif /* CONFIG_PREEMPT */
.import do_signal,code
intr_do_signal:
/*
This check is critical to having LWS
working. The IASQ is zero on the gateway
page and we cannot deliver any signals until
we get off the gateway page.
Only do signals if we are returning to user space
*/
LDREG PT_IASQ0(%r16), %r20
CMPIB= 0,%r20,intr_restore /* backward */
nop
LDREG PT_IASQ1(%r16), %r20
CMPIB= 0,%r20,intr_restore /* backward */
nop
copy %r0, %r24 /* unsigned long in_syscall */
copy %r16, %r25 /* struct pt_regs *regs */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
b intr_check_sig
nop
/*
* External interrupts.
*/
@ -1115,11 +1109,7 @@ intr_extint:
mfctl %cr31,%r1
copy %r30,%r17
/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
#ifdef CONFIG_64BIT
depdi 0,63,15,%r17
#else
depi 0,31,15,%r17
#endif
DEPI 0,31,15,%r17
CMPB=,n %r1,%r17,2f
get_stack_use_cr31
b,n 3f
@ -1148,13 +1138,12 @@ intr_extint:
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
ENDPROC(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
.export intr_save, code /* for os_hpmc */
intr_save:
ENTRY(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
CMPIB=,n 0,%r16,1f
get_stack_use_cr30
@ -1229,6 +1218,7 @@ skip_save_ior:
b handle_interruption
ldo R%intr_check_sig(%r2), %r2
ENDPROC(intr_save)
/*
@ -1814,9 +1804,7 @@ dtlb_fault:
LDREG PT_GR18(\regs),%r18
.endm
.export sys_fork_wrapper
.export child_return
sys_fork_wrapper:
ENTRY(sys_fork_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
@ -1853,9 +1841,10 @@ wrapper_exit:
ldi __NR_fork,%r20
bv %r0(%r2)
STREG %r20,PT_GR20(%r1)
ENDPROC(sys_fork_wrapper)
/* Set the return value for the child */
child_return:
ENTRY(child_return)
BL schedule_tail, %r2
nop
@ -1863,10 +1852,10 @@ child_return:
LDREG TASK_PT_GR19(%r1),%r2
b wrapper_exit
copy %r0,%r28
ENDPROC(child_return)
.export sys_clone_wrapper
sys_clone_wrapper:
ENTRY(sys_clone_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_save %r1
@ -1887,9 +1876,10 @@ sys_clone_wrapper:
b wrapper_exit
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
ENDPROC(sys_clone_wrapper)
.export sys_vfork_wrapper
sys_vfork_wrapper:
ENTRY(sys_vfork_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_save %r1
@ -1910,6 +1900,7 @@ sys_vfork_wrapper:
b wrapper_exit
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
ENDPROC(sys_vfork_wrapper)
.macro execve_wrapper execve
@ -1946,22 +1937,19 @@ error_\execve:
nop
.endm
.export sys_execve_wrapper
.import sys_execve
sys_execve_wrapper:
ENTRY(sys_execve_wrapper)
execve_wrapper sys_execve
ENDPROC(sys_execve_wrapper)
#ifdef CONFIG_64BIT
.export sys32_execve_wrapper
.import sys32_execve
sys32_execve_wrapper:
ENTRY(sys32_execve_wrapper)
execve_wrapper sys32_execve
ENDPROC(sys32_execve_wrapper)
#endif
.export sys_rt_sigreturn_wrapper
sys_rt_sigreturn_wrapper:
ENTRY(sys_rt_sigreturn_wrapper)
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
@ -1989,9 +1977,9 @@ sys_rt_sigreturn_wrapper:
*/
bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
ENDPROC(sys_rt_sigreturn_wrapper)
.export sys_sigaltstack_wrapper
sys_sigaltstack_wrapper:
ENTRY(sys_sigaltstack_wrapper)
/* Get the user stack pointer */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1),%r24 /* get pt regs */
@ -1999,10 +1987,10 @@ sys_sigaltstack_wrapper:
STREG %r2, -RP_OFFSET(%r30)
#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
b,l do_sigaltstack,%r2
BL do_sigaltstack,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
bl do_sigaltstack,%r2
BL do_sigaltstack,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
@ -2010,53 +1998,26 @@ sys_sigaltstack_wrapper:
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
nop
ENDPROC(sys_sigaltstack_wrapper)
#ifdef CONFIG_64BIT
.export sys32_sigaltstack_wrapper
sys32_sigaltstack_wrapper:
ENTRY(sys32_sigaltstack_wrapper)
/* Get the user stack pointer */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30), %r30
b,l do_sigaltstack32,%r2
BL do_sigaltstack32,%r2
ldo -16(%r30),%r29 /* Reference param save area */
ldo -FRAME_SIZE(%r30), %r30
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
nop
ENDPROC(sys32_sigaltstack_wrapper)
#endif
.export sys_rt_sigsuspend_wrapper
sys_rt_sigsuspend_wrapper:
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r24
reg_save %r24
STREG %r2, -RP_OFFSET(%r30)
#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
b,l sys_rt_sigsuspend,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
bl sys_rt_sigsuspend,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
ldo -FRAME_SIZE(%r30), %r30
LDREG -RP_OFFSET(%r30), %r2
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_restore %r1
bv %r0(%r2)
nop
.export syscall_exit
syscall_exit:
ENTRY(syscall_exit)
/* NOTE: HP-UX syscalls also come through here
* after hpux_syscall_exit fixes up return
* values. */
@ -2119,9 +2080,35 @@ syscall_check_resched:
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
.import do_signal,code
syscall_check_sig:
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
and,COND(<>) %r19, %r26, %r0
b,n syscall_restore /* skip past if we've nothing to do */
syscall_do_signal:
/* Save callee-save registers (for sigcontext).
* FIXME: After this point the process structure should be
* consistent with all the relevant state of the process
* before the syscall. We need to verify this.
*/
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
reg_save %r26
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_notify_resume,%r2
ldi 1, %r25 /* long in_syscall = 1 */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
reg_restore %r20
b,n syscall_check_sig
syscall_restore:
/* Are we being ptraced? */
@ -2259,31 +2246,10 @@ syscall_do_resched:
#endif
b syscall_check_bh /* if resched, we start over again */
nop
ENDPROC(syscall_exit)
.import do_signal,code
syscall_do_signal:
/* Save callee-save registers (for sigcontext).
FIXME: After this point the process structure should be
consistent with all the relevant state of the process
before the syscall. We need to verify this. */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
reg_save %r25
ldi 1, %r24 /* unsigned long in_syscall */
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
BL do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
reg_restore %r20
b,n syscall_check_sig
get_register:
/*
* get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into
@ -2294,8 +2260,6 @@ syscall_do_signal:
* a -1 in it, but that is OK, it just means that we will have
* to use the slow path instead).
*/
get_register:
blr %r8,%r0
nop
bv %r0(%r25) /* r0 */
@ -2363,13 +2327,13 @@ get_register:
bv %r0(%r25) /* r31 */
copy %r31,%r1
set_register:
/*
* set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in
* r8.
*/
set_register:
blr %r8,%r0
nop
bv %r0(%r25) /* r0 (silly, but it is a place holder) */
@ -2436,3 +2400,4 @@ set_register:
copy %r1,%r30
bv %r0(%r25) /* r31 */
copy %r1,%r31

View File

@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(pdc_lock);
static unsigned long pdc_result[32] __attribute__ ((aligned (8)));
static unsigned long pdc_result2[32] __attribute__ ((aligned (8)));
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define WIDE_FIRMWARE 0x1
#define NARROW_FIRMWARE 0x2
@ -94,12 +94,12 @@ int parisc_narrow_firmware __read_mostly = 1;
* when running a 64-bit kernel on such boxes (e.g. C200 or C360).
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
long real64_call(unsigned long function, ...);
#endif
long real32_call(unsigned long function, ...);
#ifdef __LP64__
#ifdef CONFIG_64BIT
# define MEM_PDC (unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc
# define mem_pdc_call(args...) unlikely(parisc_narrow_firmware) ? real32_call(MEM_PDC, args) : real64_call(MEM_PDC, args)
#else
@ -117,7 +117,7 @@ long real32_call(unsigned long function, ...);
*/
static unsigned long f_extend(unsigned long address)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
if(unlikely(parisc_narrow_firmware)) {
if((address & 0xff000000) == 0xf0000000)
return 0xf0f0f0f000000000UL | (u32)address;
@ -139,7 +139,7 @@ static unsigned long f_extend(unsigned long address)
*/
static void convert_to_wide(unsigned long *addr)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
int i;
unsigned int *p = (unsigned int *)addr;
@ -158,7 +158,7 @@ static void convert_to_wide(unsigned long *addr)
*/
void __init set_firmware_width(void)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
int retval;
unsigned long flags;
@ -238,7 +238,7 @@ int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_inf
*
* Must be correctly formatted or expect system crash
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
{
int retval = 0;
@ -949,7 +949,7 @@ int pdc_tod_set(unsigned long sec, unsigned long usec)
}
EXPORT_SYMBOL(pdc_tod_set);
#ifdef __LP64__
#ifdef CONFIG_64BIT
int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
struct pdc_memory_table *tbl, unsigned long entries)
{
@ -965,7 +965,7 @@ int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
return retval;
}
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
/* FIXME: Is this pdc used? I could not find type reference to ftc_bitmap
* so I guessed at unsigned long. Someone who knows what this does, can fix
@ -1204,7 +1204,7 @@ int pdc_sti_call(unsigned long func, unsigned long flags,
}
EXPORT_SYMBOL(pdc_sti_call);
#ifdef __LP64__
#ifdef CONFIG_64BIT
/**
* pdc_pat_cell_get_number - Returns the cell number.
* @cell_info: The return buffer.
@ -1387,7 +1387,7 @@ int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
return retval;
}
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
/***************** 32-bit real-mode calls ***********/
@ -1445,7 +1445,7 @@ long real32_call(unsigned long fn, ...)
return real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
/***************** 64-bit real-mode calls ***********/
struct wide_stack {
@ -1496,5 +1496,5 @@ long real64_call(unsigned long fn, ...)
return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
}
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */

View File

@ -2,7 +2,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Helge Deller
* Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
@ -19,16 +19,17 @@
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <linux/linkage.h>
.level LEVEL
.data
.export boot_args
boot_args:
ENTRY(boot_args)
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
END(boot_args)
.text
.align 4
@ -38,10 +39,9 @@ boot_args:
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
.export stext
.export _stext,data /* Kernel want it this way! */
_stext:
stext:
ENTRY(stext)
.proc
.callinfo
@ -343,6 +343,9 @@ smp_slave_stext:
.procend
#endif /* CONFIG_SMP */
ENDPROC(stext)
#ifndef CONFIG_64BIT
.data

View File

@ -46,6 +46,8 @@
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <linux/linkage.h>
/*
* stack for os_hpmc, the HPMC handler.
* buffer for IODC procedures (for the HPMC handler).
@ -69,17 +71,15 @@ hpmc_raddr:
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
.export hpmc_pim_data, data
.align 8
hpmc_pim_data:
ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
END(hpmc_pim_data)
.text
.export os_hpmc, code
.import intr_save, code
os_hpmc:
ENTRY(os_hpmc)
/*
* registers modified:
@ -294,11 +294,9 @@ os_hpmc_6:
b .
nop
ENDPROC(os_hpmc)
/* this label used to compute os_hpmc checksum */
.export os_hpmc_end, code
os_hpmc_end:
ENTRY(os_hpmc_end)
nop

View File

@ -47,7 +47,7 @@ void __init setup_pdc(void)
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
struct pdc_model model;
#ifdef __LP64__
#ifdef CONFIG_64BIT
struct pdc_pat_cell_num cell_info;
#endif
@ -73,7 +73,7 @@ void __init setup_pdc(void)
* clearer message.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_PAT;
@ -152,7 +152,7 @@ static void __init pagezero_memconfig(void)
npmem_ranges = 1;
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* All of the PDC PAT specific code is 64-bit only */
@ -408,13 +408,13 @@ static void __init sprockets_memconfig(void)
}
}
#else /* !__LP64__ */
#else /* !CONFIG_64BIT */
#define pat_inventory() do { } while (0)
#define pat_memconfig() do { } while (0)
#define sprockets_memconfig() pagezero_memconfig()
#endif /* !__LP64__ */
#endif /* !CONFIG_64BIT */
#ifndef CONFIG_PA20

View File

@ -336,11 +336,7 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
static inline int eirr_to_irq(unsigned long eirr)
{
#ifdef CONFIG_64BIT
int bit = fls64(eirr);
#else
int bit = fls(eirr);
#endif
int bit = fls_long(eirr);
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}

View File

@ -46,6 +46,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <asm/unwind.h>
@ -96,7 +97,7 @@ static inline int in_local_section(struct module *me, void *loc, void *dot)
}
#ifndef __LP64__
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
};
@ -176,7 +177,7 @@ void *module_alloc(unsigned long size)
return vmalloc(size);
}
#ifndef __LP64__
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
return 0;
@ -319,7 +320,7 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
return 0;
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
{
unsigned int i;
@ -342,9 +343,9 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
value);
return i * sizeof(struct got_entry);
}
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
#ifdef __LP64__
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
@ -368,7 +369,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
enum elf_stub_type {
ELF_STUB_GOT,
@ -394,7 +395,7 @@ static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
i * sizeof(struct stub_entry);
}
#ifndef __LP64__
#ifndef CONFIG_64BIT
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
* be,n R'XXX(%sr4,%r1)
@ -472,7 +473,7 @@ int apply_relocate(Elf_Shdr *sechdrs,
return -ENOEXEC;
}
#ifndef __LP64__
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@ -822,7 +823,8 @@ int module_finalize(const Elf_Ehdr *hdr,
me->name, strtab, symhdr);
if(me->arch.got_count > MAX_GOTS) {
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d\n", me->name, me->arch.got_count, MAX_GOTS);
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
@ -850,10 +852,11 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
return 0;
return module_bug_finalize(hdr, sechdrs, me);
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
module_bug_cleanup(mod);
}

View File

@ -27,31 +27,21 @@
*/
#ifdef CONFIG_64BIT
#define ADDIB addib,*
#define CMPB cmpb,*
#define ANDCM andcm,*
.level 2.0w
#else
#define ADDIB addib,
#define CMPB cmpb,
#define ANDCM andcm
.level 2.0
#endif
#include <asm/psw.h>
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
#include <linux/linkage.h>
.text
.align 128
.export flush_tlb_all_local,code
flush_tlb_all_local:
ENTRY(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
@ -200,11 +190,11 @@ fdtdone:
.exit
.procend
ENDPROC(flush_tlb_all_local)
.export flush_instruction_cache_local,code
.import cache_info,data
flush_instruction_cache_local:
ENTRY(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
@ -241,11 +231,11 @@ fisync:
.exit
.procend
ENDPROC(flush_instruction_cache_local)
.export flush_data_cache_local, code
.import cache_info, data
flush_data_cache_local:
ENTRY(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
@ -283,11 +273,11 @@ fdsync:
.exit
.procend
ENDPROC(flush_data_cache_local)
.export copy_user_page_asm,code
.align 16
copy_user_page_asm:
ENTRY(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@ -409,6 +399,7 @@ copy_user_page_asm:
.exit
.procend
ENDPROC(copy_user_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
@ -446,9 +437,7 @@ copy_user_page_asm:
* lobby for such a change.
*/
.export copy_user_page_asm,code
copy_user_page_asm:
ENTRY(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@ -534,11 +523,10 @@ copy_user_page_asm:
.exit
.procend
ENDPROC(copy_user_page_asm)
#endif
.export __clear_user_page_asm,code
__clear_user_page_asm:
ENTRY(__clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
@ -618,10 +606,9 @@ __clear_user_page_asm:
.exit
.procend
ENDPROC(__clear_user_page_asm)
.export flush_kernel_dcache_page_asm
flush_kernel_dcache_page_asm:
ENTRY(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
@ -662,10 +649,9 @@ flush_kernel_dcache_page_asm:
.exit
.procend
ENDPROC(flush_kernel_dcache_page_asm)
.export flush_user_dcache_page
flush_user_dcache_page:
ENTRY(flush_user_dcache_page)
.proc
.callinfo NO_CALLS
.entry
@ -706,10 +692,9 @@ flush_user_dcache_page:
.exit
.procend
ENDPROC(flush_user_dcache_page)
.export flush_user_icache_page
flush_user_icache_page:
ENTRY(flush_user_icache_page)
.proc
.callinfo NO_CALLS
.entry
@ -750,11 +735,10 @@ flush_user_icache_page:
.exit
.procend
ENDPROC(flush_user_icache_page)
.export purge_kernel_dcache_page
purge_kernel_dcache_page:
ENTRY(purge_kernel_dcache_page)
.proc
.callinfo NO_CALLS
.entry
@ -794,15 +778,14 @@ purge_kernel_dcache_page:
.exit
.procend
ENDPROC(purge_kernel_dcache_page)
#if 0
/* Currently not used, but it still is a possible alternate
* solution.
*/
.export flush_alias_page
flush_alias_page:
ENTRY(flush_alias_page)
.proc
.callinfo NO_CALLS
.entry
@ -882,10 +865,9 @@ flush_user_dcache_range_asm:
.exit
.procend
ENDPROC(flush_alias_page)
.export flush_kernel_dcache_range_asm
flush_kernel_dcache_range_asm:
ENTRY(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@ -905,10 +887,9 @@ flush_kernel_dcache_range_asm:
.exit
.procend
ENDPROC(flush_kernel_dcache_range_asm)
.export flush_user_icache_range_asm
flush_user_icache_range_asm:
ENTRY(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@ -927,10 +908,9 @@ flush_user_icache_range_asm:
.exit
.procend
ENDPROC(flush_user_icache_range_asm)
.export flush_kernel_icache_page
flush_kernel_icache_page:
ENTRY(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
@ -971,10 +951,9 @@ flush_kernel_icache_page:
.exit
.procend
ENDPROC(flush_kernel_icache_page)
.export flush_kernel_icache_range_asm
flush_kernel_icache_range_asm:
ENTRY(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
@ -992,14 +971,13 @@ flush_kernel_icache_range_asm:
nop
.exit
.procend
ENDPROC(flush_kernel_icache_range_asm)
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
.align 256
.export disable_sr_hashing_asm,code
disable_sr_hashing_asm:
ENTRY(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
@ -1088,5 +1066,6 @@ srdis_done:
.exit
.procend
ENDPROC(disable_sr_hashing_asm)
.end

View File

@ -7,7 +7,7 @@
* Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
* Copyright (C) 2002-2003 Helge Deller <deller with parisc-linux.org>
* Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -38,7 +38,7 @@ EXPORT_SYMBOL(__cmpxchg_u32);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef __LP64__
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(__xchg64);
EXPORT_SYMBOL(__cmpxchg_u64);
#endif
@ -58,7 +58,7 @@ EXPORT_SYMBOL(fixup_get_user_skip_2);
EXPORT_SYMBOL(fixup_put_user_skip_1);
EXPORT_SYMBOL(fixup_put_user_skip_2);
#ifndef __LP64__
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
EXPORT_SYMBOL($global$);
@ -135,7 +135,7 @@ EXPORT_SYMBOL(__muldi3);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
#ifdef __LP64__
#ifdef CONFIG_64BIT
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __umoddi3(void);
@ -147,7 +147,7 @@ EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__moddi3);
#endif
#ifndef __LP64__
#ifndef CONFIG_64BIT
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif

View File

@ -342,7 +342,7 @@ pcxl_dma_init(void)
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("gsc", 0);
proc_gsc_root = proc_mkdir("gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");

View File

@ -200,8 +200,8 @@ static void
pcibios_link_hba_resources( struct resource *hba_res, struct resource *r)
{
if (!r->parent) {
printk(KERN_EMERG "PCI: resource not parented! [%lx-%lx]\n",
r->start, r->end);
printk(KERN_EMERG "PCI: resource not parented! [%p-%p]\n",
(void*) r->start, (void*) r->end);
r->parent = hba_res;
/* reverse link is harder *sigh* */

View File

@ -20,6 +20,7 @@
*/
#include <asm/assembly.h>
#include <linux/linkage.h>
#ifdef CONFIG_64BIT
.level 2.0w
@ -41,10 +42,8 @@
; starting/stopping the coprocessor with the pmenb/pmdis.
;
.text
.align 32
.export perf_intrigue_enable_perf_counters,code
perf_intrigue_enable_perf_counters:
ENTRY(perf_intrigue_enable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -69,9 +68,9 @@ perf_intrigue_enable_perf_counters:
nop
.exit
.procend
ENDPROC(perf_intrigue_enable_perf_counters)
.export perf_intrigue_disable_perf_counters,code
perf_intrigue_disable_perf_counters:
ENTRY(perf_intrigue_disable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -86,6 +85,7 @@ perf_intrigue_disable_perf_counters:
mtctl %r26,ccr ; turn off performance coprocessor
.exit
.procend
ENDPROC(perf_intrigue_disable_perf_counters)
;***********************************************************************
;*
@ -117,8 +117,7 @@ perf_intrigue_disable_perf_counters:
;*
;***********************************************************************
.export perf_rdr_shift_in_W,code
perf_rdr_shift_in_W:
ENTRY(perf_rdr_shift_in_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -550,6 +549,7 @@ perf_rdr_shift_in_W_leave:
.exit
MTDIAG_2 (24) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_in_W)
;***********************************************************************
@ -575,8 +575,7 @@ perf_rdr_shift_in_W_leave:
;*
;***********************************************************************
.export perf_rdr_shift_out_W,code
perf_rdr_shift_out_W:
ENTRY(perf_rdr_shift_out_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -983,6 +982,7 @@ perf_rdr_shift_out_W_leave:
.exit
MTDIAG_2 (23) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_out_W)
;***********************************************************************
@ -1012,8 +1012,7 @@ perf_rdr_shift_out_W_leave:
;*
;***********************************************************************
.export perf_rdr_shift_in_U,code
perf_rdr_shift_in_U:
ENTRY(perf_rdr_shift_in_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -1343,6 +1342,7 @@ perf_rdr_shift_in_U_leave:
.exit
MTDIAG_2 (24) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_in_U)
;***********************************************************************
;*
@ -1369,8 +1369,7 @@ perf_rdr_shift_in_U_leave:
;*
;***********************************************************************
.export perf_rdr_shift_out_U,code
perf_rdr_shift_out_U:
ENTRY(perf_rdr_shift_out_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
@ -1687,4 +1686,5 @@ perf_rdr_shift_out_U_leave:
.exit
MTDIAG_2 (23) ; restore DR2
.procend
ENDPROC(perf_rdr_shift_out_U)

View File

@ -13,7 +13,7 @@
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2002 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
@ -303,7 +303,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
cregs->gr[27] = pregs->gr[27];
#endif
cregs->gr[26] = pregs->gr[26];
@ -355,8 +355,8 @@ asmlinkage int sys_execve(struct pt_regs *regs)
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, (char __user **) regs->gr[25],
(char __user **) regs->gr[24], regs);
error = do_execve(filename, (char __user * __user *) regs->gr[25],
(char __user * __user *) regs->gr[24], regs);
if (error == 0) {
task_lock(current);
current->ptrace &= ~PT_DTRACE;

View File

@ -93,7 +93,7 @@ static int __init processor_probe(struct parisc_device *dev)
cpuid = boot_cpu_data.cpu_count;
txn_addr = dev->hpa.start; /* for legacy PDC */
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
@ -153,8 +153,6 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpuid = cpuid; /* save CPU id */
p->txn_addr = txn_addr; /* save CPU IRQ address */
#ifdef CONFIG_SMP
spin_lock_init(&p->lock);
/*
** FIXME: review if any other initialization is clobbered
** for boot_cpu by the above memset().
@ -311,11 +309,11 @@ int __init init_per_cpu(int cpunum)
} else {
printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
#ifdef __LP64__
#ifdef CONFIG_64BIT
"Halting Machine - FP required\n"
#endif
, coproc_cfg.ccr_functional);
#ifdef __LP64__
#ifdef CONFIG_64BIT
mdelay(100); /* previous chars get pushed to console */
panic("FP CoProc not reported");
#endif
@ -339,9 +337,6 @@ show_cpuinfo (struct seq_file *m, void *v)
#ifdef CONFIG_SMP
if (0 == cpu_data[n].hpa)
continue;
#ifdef ENTRY_SYS_CPUS
#error iCOD support wants to show CPU state here
#endif
#endif
seq_printf(m, "processor\t: %d\n"
"cpu family\t: PA-RISC %s\n",

View File

@ -36,7 +36,7 @@
#define DBG(x...)
#endif
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* This function is needed to translate 32 bit pt_regs offsets in to
* 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
@ -90,7 +90,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKDATA: {
int copied;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (__is_compat_task(child)) {
unsigned int tmp;
@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = 0;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (__is_compat_task(child)) {
unsigned int tmp = (unsigned int)data;
DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
@ -145,7 +145,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR: {
ret = -EIO;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (__is_compat_task(child)) {
unsigned int tmp;
@ -204,7 +204,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = 0;
goto out_tsk;
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (__is_compat_task(child)) {
if (addr & (sizeof(int)-1))
goto out_tsk;

View File

@ -11,6 +11,8 @@
#include <asm/psw.h>
#include <asm/assembly.h>
#include <linux/linkage.h>
.section .bss
.export real_stack
.export real32_stack
@ -39,8 +41,6 @@ save_cr_end:
.text
.export real32_call_asm
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
* unsigned int iodc_fn)
@ -49,7 +49,7 @@ save_cr_end:
* iodc_fn is the IODC function to call
*/
real32_call_asm:
ENTRY(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
@ -107,6 +107,7 @@ ric_ret:
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
@ -218,7 +219,6 @@ rfi_r2v_1:
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
.export real64_call_asm
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
@ -226,7 +226,7 @@ rfi_r2v_1:
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
real64_call_asm:
ENTRY(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
@ -272,19 +272,21 @@ r64_ret:
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
ENDPROC(real64_call_asm)
#endif
.export __canonicalize_funcptr_for_compare
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
__canonicalize_funcptr_for_compare:
ENTRY(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
ENDPROC(__canonicalize_funcptr_for_compare)

View File

@ -120,13 +120,13 @@ extern void collect_boot_cpu_data(void);
void __init setup_arch(char **cmdline_p)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
extern int parisc_narrow_firmware;
#endif
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
#ifdef __LP64__
#ifdef CONFIG_64BIT
printk(KERN_INFO "The 64-bit Kernel has started...\n");
#else
printk(KERN_INFO "The 32-bit Kernel has started...\n");
@ -134,7 +134,7 @@ void __init setup_arch(char **cmdline_p)
pdc_console_init();
#ifdef __LP64__
#ifdef CONFIG_64BIT
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
}

View File

@ -59,58 +59,13 @@
* this. */
#define A(__x) ((unsigned long)(__x))
int do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#include "sys32.h"
#endif
asmlinkage int
sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
{
sigset_t saveset, newset;
#ifdef __LP64__
compat_sigset_t newset32;
if (is_compat_task()) {
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&newset32, (compat_sigset_t __user *)unewset, sizeof(newset32)))
return -EFAULT;
sigset_32to64(&newset,&newset32);
} else
#endif
{
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
}
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs->gr[28] = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(&saveset, regs, 1))
return -EINTR;
}
}
/*
* Do a signal return - restore sigcontext.
*/
@ -148,7 +103,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef __LP64__
#ifdef CONFIG_64BIT
compat_sigset_t compat_set;
struct compat_rt_sigframe __user * compat_frame;
@ -162,7 +117,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
(usp - sigframe_size);
DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
#ifdef __LP64__
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
@ -184,7 +139,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
spin_unlock_irq(&current->sighand->siglock);
/* Good thing we saved the old gr[30], eh? */
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (is_compat_task()) {
DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
&compat_frame->uc.uc_mcontext);
@ -296,7 +251,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
int err = 0;
#ifdef __LP64__
#ifdef CONFIG_64BIT
compat_int_t compat_val;
struct compat_rt_sigframe __user * compat_frame;
compat_sigset_t compat_set;
@ -310,7 +265,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info);
#ifdef __LP64__
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
@ -390,7 +345,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
haddr = A(ka->sa.sa_handler);
/* The sa_handler may be a pointer to a function descriptor */
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (is_compat_task()) {
#endif
if (haddr & PA_PLABEL_FDESC) {
@ -405,7 +360,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
} else {
Elf64_Fdesc fdesc;
Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
@ -425,19 +380,19 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* The syscall return path will create IAOQ values from r31.
*/
sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1;
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W;
#endif
@ -462,7 +417,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->gr[2] = rp; /* userland return pointer */
regs->gr[26] = sig; /* signal number */
#ifdef __LP64__
#ifdef CONFIG_64BIT
if (is_compat_task()) {
regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
@ -516,6 +471,97 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
return 1;
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn =
do_no_restart_syscall;
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
regs->gr[28] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
* we have to do is fiddle the return pointer.
*/
regs->gr[31] -= 8; /* delayed branching */
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
break;
}
}
static inline void
insert_restart_trampoline(struct pt_regs *regs)
{
switch(regs->gr[28]) {
case -ERESTART_RESTARTBLOCK: {
/* Restart the system call - no handlers present */
unsigned int *usp = (unsigned int *)regs->gr[30];
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
* 8: ldw 0(%sp), %r31
* 12: be 0x100(%sr2, %r0)
* 16: ldi __NR_restart_syscall, %r20
*/
#ifdef CONFIG_64BIT
put_user(regs->gr[31] >> 32, &usp[0]);
put_user(regs->gr[31] & 0xffffffff, &usp[1]);
put_user(0x0fc010df, &usp[2]);
#else
put_user(regs->gr[31], &usp[0]);
put_user(0x0fc0109f, &usp[2]);
#endif
put_user(0xe0008200, &usp[3]);
put_user(0x34140000, &usp[4]);
/* Stack is 64-byte aligned, and we only need
* to flush 1 cache line.
* Flushing one cacheline is cheap.
* "sync" on bigger (> 4 way) boxes is not.
*/
flush_icache_range(regs->gr[30], regs->gr[30] + 4);
regs->gr[31] = regs->gr[30] + 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
return;
}
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR: {
/* Hooray for delayed branching. We don't
* have to restore %r20 (the system call
* number) because it gets loaded in the delay
* slot of the branch external instruction.
*/
regs->gr[31] -= 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
return;
}
default:
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
@ -527,13 +573,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
* registers). As noted below, the syscall number gets restored for
* us due to the magic of delayed branching.
*/
asmlinkage int
do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall)
asmlinkage void
do_signal(struct pt_regs *regs, long in_syscall)
{
siginfo_t info;
struct k_sigaction ka;
int signr;
sigset_t *oldset;
DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
oldset, regs, regs->sr[7], in_syscall);
@ -543,7 +589,9 @@ do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall)
we would be called in that case, but for some reason we
are. */
if (!oldset)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
DBG(1,"do_signal: oldset %08lx / %08lx\n",
@ -560,98 +608,41 @@ do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall)
break;
/* Restart a system call if necessary. */
if (in_syscall) {
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
current_thread_info()->restart_block.fn = do_no_restart_syscall;
case -ERESTARTNOHAND:
DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
regs->gr[28] = -EINTR;
break;
if (in_syscall)
syscall_restart(regs, &ka);
case -ERESTARTSYS:
if (!(ka.sa.sa_flags & SA_RESTART)) {
DBG(1,"ERESTARTSYS: putting -EINTR\n");
regs->gr[28] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
/* A syscall is just a branch, so all
we have to do is fiddle the return pointer. */
regs->gr[31] -= 8; /* delayed branching */
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
break;
}
}
/* Whee! Actually deliver the signal. If the
delivery failed, we need to continue to iterate in
this loop so we can deliver the SIGSEGV... */
if (handle_signal(signr, &info, &ka, oldset, regs, in_syscall)) {
if (handle_signal(signr, &info, &ka, oldset,
regs, in_syscall)) {
DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
regs->gr[28]);
return 1;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
}
/* end of while(1) looping forever if we can't force a signal */
/* Did we come from a system call? */
if (in_syscall) {
/* Restart the system call - no handlers present */
if (regs->gr[28] == -ERESTART_RESTARTBLOCK) {
unsigned int *usp = (unsigned int *)regs->gr[30];
/* Setup a trampoline to restart the syscall
* with __NR_restart_syscall
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
* 8: ldw 0(%sp), %r31
* 12: be 0x100(%sr2, %r0)
* 16: ldi __NR_restart_syscall, %r20
*/
#ifndef __LP64__
put_user(regs->gr[31], &usp[0]);
put_user(0x0fc0109f, &usp[2]);
#else
put_user(regs->gr[31] >> 32, &usp[0]);
put_user(regs->gr[31] & 0xffffffff, &usp[1]);
put_user(0x0fc010df, &usp[2]);
#endif
put_user(0xe0008200, &usp[3]);
put_user(0x34140000, &usp[4]);
/* Stack is 64-byte aligned, and we only need
* to flush 1 cache line.
* Flushing one cacheline is cheap.
* "sync" on bigger (> 4 way) boxes is not.
*/
asm("fdc %%r0(%%sr3, %0)\n"
"sync\n"
"fic %%r0(%%sr3, %0)\n"
"sync\n"
: : "r"(regs->gr[30]));
regs->gr[31] = regs->gr[30] + 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
} else if (regs->gr[28] == -ERESTARTNOHAND ||
regs->gr[28] == -ERESTARTSYS ||
regs->gr[28] == -ERESTARTNOINTR) {
/* Hooray for delayed branching. We don't
have to restore %r20 (the system call
number) because it gets loaded in the delay
slot of the branch external instruction. */
regs->gr[31] -= 8;
/* Preserve original r28. */
regs->gr[28] = regs->orig_r28;
}
}
if (in_syscall)
insert_restart_trampoline(regs);
DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
regs->gr[28]);
return 0;
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
return;
}
void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_RESTORE_SIGMASK))
do_signal(regs, in_syscall);
}

View File

@ -1,6 +1,8 @@
/* Signal support for 32-bit kernel builds
*
* Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org>
*
* Code was mostly borrowed from kernel/signal.c.
* See kernel/signal.c for additional Copyrights.
*
@ -401,7 +403,7 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
int
copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
{
unsigned long tmp;
compat_uptr_t addr;
int err;
if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
@ -424,8 +426,8 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
err |= __get_user(to->si_uid, &from->si_uid);
break;
case __SI_FAULT >> 16:
err |= __get_user(tmp, &from->si_addr);
to->si_addr = (void __user *) tmp;
err |= __get_user(addr, &from->si_addr);
to->si_addr = compat_ptr(addr);
break;
case __SI_POLL >> 16:
err |= __get_user(to->si_band, &from->si_band);
@ -445,7 +447,8 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
int
copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
{
unsigned int addr;
compat_uptr_t addr;
compat_int_t val;
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
@ -474,8 +477,8 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
/* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
err |= __put_user(from->_sifields._pad[0], &to->si_addr);
addr = ptr_to_compat(from->si_addr);
err |= __put_user(addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
@ -484,17 +487,36 @@ copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
val = (compat_int_t)from->si_int;
err |= __put_user(val, &to->si_int);
break;
case __SI_RT >> 16: /* Not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
addr = (unsigned long) from->si_ptr;
err |= __put_user(addr, &to->si_ptr);
val = (compat_int_t)from->si_int;
err |= __put_user(val, &to->si_int);
break;
}
}
return err;
}
asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
struct compat_siginfo __user *uinfo)
{
siginfo_t info;
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
/* Not even root can pretend to send signals from the kernel.
Nor can they impersonate a kill(), which adds source info. */
if (info.si_code >= 0)
return -EPERM;
info.si_signo = sig;
/* POSIX.1b doesn't mention process groups. */
return kill_proc_info(sig, &info, pid);
}

View File

@ -16,9 +16,6 @@
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
*/
#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@ -51,7 +48,15 @@
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#define kDEBUG 0
#undef DEBUG_SMP
#ifdef DEBUG_SMP
static int smp_debug_lvl = 0;
#define smp_debug(lvl, printargs...) \
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
#define smp_debug(lvl, ...)
#endif /* DEBUG_SMP */
DEFINE_SPINLOCK(smp_lock);
@ -76,6 +81,7 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CP
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
struct smp_call_struct {
void (*func) (void *info);
@ -107,13 +113,6 @@ enum ipi_message_type {
static void
ipi_init(int cpuid)
{
/* If CPU is present ... */
#ifdef ENTRY_SYS_CPUS
/* *and* running (not stopped) ... */
#error iCOD support wants state checked here.
#endif
#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if(cpu_online(cpuid) )
@ -133,23 +132,12 @@ ipi_init(int cpuid)
static void
halt_processor(void)
{
#ifdef ENTRY_SYS_CPUS
#error halt_processor() needs rework
/*
** o migrate I/O interrupts off this CPU.
** o leave IPI enabled - __cli() will disable IPI.
** o leave CPU in online map - just change the state
*/
cpu_data[this_cpu].state = STATE_STOPPED;
mark_bh(IPI_BH);
#else
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
cpu_clear(smp_processor_id(), cpu_online_map);
local_irq_disable();
for (;;)
;
#endif
}
@ -167,10 +155,11 @@ ipi_interrupt(int irq, void *dev_id)
mb(); /* Order interrupt and bit testing. */
for (;;) {
spin_lock_irqsave(&(p->lock),flags);
spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
spin_lock_irqsave(lock, flags);
ops = p->pending_ipi;
p->pending_ipi = 0;
spin_unlock_irqrestore(&(p->lock),flags);
spin_unlock_irqrestore(lock, flags);
mb(); /* Order bit clearing and data access. */
@ -184,15 +173,11 @@ ipi_interrupt(int irq, void *dev_id)
switch (which) {
case IPI_NOP:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
#endif /* kDEBUG */
smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
break;
case IPI_RESCHEDULE:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
#endif /* kDEBUG */
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
/*
* Reschedule callback. Everything to be
* done is done by the interrupt return path.
@ -200,9 +185,7 @@ ipi_interrupt(int irq, void *dev_id)
break;
case IPI_CALL_FUNC:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
#endif /* kDEBUG */
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
{
volatile struct smp_call_struct *data;
void (*func)(void *info);
@ -233,28 +216,16 @@ ipi_interrupt(int irq, void *dev_id)
break;
case IPI_CPU_START:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
p->state = STATE_RUNNING;
#endif
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
break;
case IPI_CPU_STOP:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
#else
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
halt_processor();
#endif
break;
case IPI_CPU_TEST:
#if (kDEBUG>=100)
printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
#endif /* kDEBUG */
smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
break;
default:
@ -275,12 +246,13 @@ static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
struct cpuinfo_parisc *p = &cpu_data[cpu];
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(&(p->lock),flags);
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
spin_unlock_irqrestore(&(p->lock),flags);
spin_unlock_irqrestore(lock, flags);
}
@ -560,13 +532,8 @@ int __init smp_boot_one_cpu(int cpuid)
alive:
/* Remember the Slave data */
#if (kDEBUG>=100)
printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
cpuid, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
cpu_data[cpuid].state = STATE_RUNNING;
#endif
return 0;
}
@ -574,10 +541,6 @@ void __devinit smp_prepare_boot_cpu(void)
{
int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
#ifdef ENTRY_SYS_CPUS
cpu_data[0].state = STATE_RUNNING;
#endif
/* Setup BSP mappings */
printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
@ -616,101 +579,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
return cpu_online(cpu) ? 0 : -ENOSYS;
}
#ifdef ENTRY_SYS_CPUS
/* Code goes along with:
** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
*/
int sys_cpus(int argc, char **argv)
{
int i,j=0;
extern int current_pid(int cpu);
if( argc > 2 ) {
printk("sys_cpus:Only one argument supported\n");
return (-1);
}
if ( argc == 1 ){
#ifdef DUMP_MORE_STATE
for_each_online_cpu(i) {
int cpus_per_line = 4;
if (j++ % cpus_per_line)
printk(" %3d",i);
else
printk("\n %3d",i);
}
printk("\n");
#else
printk("\n 0\n");
#endif
} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
#ifdef DUMP_MORE_STATE
for_each_online_cpu(i) {
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS:
printk("RENDEZVS ");
break;
case STATE_RUNNING:
printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
break;
case STATE_STOPPED:
printk("STOPPED ");
break;
case STATE_HALTED:
printk("HALTED ");
break;
default:
printk("%08x?", cpu_data[i].state);
break;
}
if(cpu_online(i)) {
printk(" %4d",current_pid(i));
}
printk(" %6d",cpu_number_map(i));
printk(" %5d",i);
printk(" 0x%lx\n",cpu_data[i].hpa);
}
}
#else
printk("\n%s %4d 0 0 --------",
(current->pid)?"RUNNING ": "IDLING ",current->pid);
#endif
} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
#ifdef DUMP_MORE_STATE
printk("\nCPUSTATE CPUID\n");
for_each_online_cpu(i) {
if (cpu_data[i].cpuid != NO_PROC_ID) {
switch(cpu_data[i].state) {
case STATE_RENDEZVOUS:
printk("RENDEZVS");break;
case STATE_RUNNING:
printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
break;
case STATE_STOPPED:
printk("STOPPED ");break;
case STATE_HALTED:
printk("HALTED ");break;
default:
}
printk(" %5d\n",i);
}
}
#else
printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
#endif
} else {
printk("sys_cpus:Unknown request\n");
return (-1);
}
return 0;
}
#endif /* ENTRY_SYS_CPUS */
#ifdef CONFIG_PROC_FS
int __init
setup_profiling_timer(unsigned int multiplier)

View File

@ -12,27 +12,23 @@
#include <asm/errno.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
#include <asm/assembly.h>
#include <asm/processor.h>
#include <linux/linkage.h>
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
*/
#define KILL_INSN break 0,0
#ifdef CONFIG_64BIT
.level 2.0w
#else
.level 1.1
#endif
.level LEVEL
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
.export linux_gateway_page
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
@ -43,7 +39,7 @@
*/
.align ASM_PAGE_SIZE
linux_gateway_page:
ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
@ -595,73 +591,43 @@ cas_action:
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(2b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
#else
.word (2b - linux_gateway_page)
.word (3b - linux_gateway_page)
#endif
ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
.previous
.section __ex_table,"aw"
#ifdef CONFIG_64BIT
/* Pad the address calculation */
.word 0,(1b - linux_gateway_page)
.word 0,(3b - linux_gateway_page)
#else
.word (1b - linux_gateway_page)
.word (3b - linux_gateway_page)
#endif
.previous
end_compare_and_swap:
/* Make sure nothing else is placed on this page */
.align ASM_PAGE_SIZE
.export end_linux_gateway_page
end_linux_gateway_page:
END(linux_gateway_page)
ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
#ifdef CONFIG_64BIT
/* FIXME: The code will always be on the gateay page
and thus it will be on the first 4k, the
assembler seems to think that the final
subtraction result is only a word in
length, so we pad the value.
*/
#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
#else
#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
#endif
#define LWS_ENTRY(_name_) ASM_ULONG_INSN (lws_##_name_ - linux_gateway_page)
.section .rodata,"a"
.align ASM_PAGE_SIZE
/* Light-weight-syscall table */
/* Start of lws table. */
.export lws_table
.Llws_table:
lws_table:
ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
END(lws_table)
/* End of lws table */
.align ASM_PAGE_SIZE
.export sys_call_table
.Lsys_call_table:
sys_call_table:
ENTRY(sys_call_table)
#include "syscall_table.S"
END(sys_call_table)
#ifdef CONFIG_64BIT
.align ASM_PAGE_SIZE
.export sys_call_table64
.Lsys_call_table64:
sys_call_table64:
ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
END(sys_call_table64)
#endif
#ifdef CONFIG_SMP
@ -671,9 +637,7 @@ sys_call_table64:
*/
.section .data
.align 4096
.export lws_lock_start
.Llws_lock_start:
lws_lock_start:
ENTRY(lws_lock_start)
/* lws locks */
.align 16
.rept 16
@ -683,6 +647,7 @@ lws_lock_start:
.word 0
.word 0
.endr
END(lws_lock_start)
.previous
#endif
/* CONFIG_SMP for lws_lock_start */

View File

@ -10,7 +10,7 @@
* Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
@ -282,8 +282,8 @@
* to worry about faulting trying to copy in a larger 64-bit
* struct from a 32-bit user-space app.
*/
ENTRY_SAME(rt_sigqueueinfo)
ENTRY_SAME(rt_sigsuspend_wrapper) /* not really SAME -- see the code */
ENTRY_COMP(rt_sigqueueinfo)
ENTRY_COMP(rt_sigsuspend)
ENTRY_SAME(chown) /* 180 */
/* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
ENTRY_COMP(setsockopt)
@ -377,9 +377,9 @@
ENTRY_SAME(inotify_init)
ENTRY_SAME(inotify_add_watch) /* 270 */
ENTRY_SAME(inotify_rm_watch)
ENTRY_SAME(ni_syscall) /* 271 ENTRY_COMP(pselect6) */
ENTRY_SAME(ni_syscall) /* 272 ENTRY_COMP(ppoll) */
ENTRY_SAME(migrate_pages)
ENTRY_COMP(pselect6)
ENTRY_COMP(ppoll)
ENTRY_COMP(openat) /* 275 */
ENTRY_SAME(mkdirat)
ENTRY_SAME(mknodat)
@ -399,5 +399,11 @@
ENTRY_SAME(splice)
ENTRY_OURS(sync_file_range)
ENTRY_SAME(tee)
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
ENTRY_SAME(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
/* Nothing yet */

View File

@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@ -98,7 +99,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
* cycles after the IT fires. But it's arbitrary how much time passes
* before we call it "late". I've picked one second.
*/
if (ticks_elapsed > HZ) {
if (unlikely(ticks_elapsed > HZ)) {
/* Scenario 3: very long delay? bad in any case */
printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
" cycles %lX rem %lX "
@ -147,10 +148,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
write_sequnlock(&xtime_lock);
}
/* check soft power switch status */
if (cpu == 0 && !atomic_read(&power_tasklet.count))
tasklet_schedule(&power_tasklet);
return IRQ_HANDLED;
}
@ -172,121 +169,41 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
/*
* Return the number of micro-seconds that elapsed since the last
* update to wall time (aka xtime). The xtime_lock
* must be at least read-locked when calling this routine.
*/
static inline unsigned long gettimeoffset (void)
/* clock source code */
static cycle_t read_cr16(void)
{
#ifndef CONFIG_SMP
/*
* FIXME: This won't work on smp because jiffies are updated by cpu 0.
* Once parisc-linux learns the cr16 difference between processors,
* this could be made to work.
*/
unsigned long now;
unsigned long prev_tick;
unsigned long next_tick;
unsigned long elapsed_cycles;
unsigned long usec;
unsigned long cpuid = smp_processor_id();
unsigned long cpt = clocktick;
next_tick = cpu_data[cpuid].it_value;
now = mfctl(16); /* Read the hardware interval timer. */
prev_tick = next_tick - cpt;
/* Assume Scenario 1: "now" is later than prev_tick. */
elapsed_cycles = now - prev_tick;
/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
#if HZ == 1000
if (elapsed_cycles > (cpt << 10) )
#elif HZ == 250
if (elapsed_cycles > (cpt << 8) )
#elif HZ == 100
if (elapsed_cycles > (cpt << 7) )
#else
#warn WTF is HZ set to anyway?
if (elapsed_cycles > (HZ * cpt) )
#endif
{
/* Scenario 3: clock ticks are missing. */
printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
" cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
cpuid, elapsed_cycles / cpt,
elapsed_cycles, prev_tick, now, next_tick, cpt);
}
/* FIXME: Can we improve the precision? Not with PAGE0. */
usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
return usec;
#else
return 0;
#endif
return get_cycles();
}
void
do_gettimeofday (struct timeval *tv)
static int cr16_update_callback(void);
static struct clocksource clocksource_cr16 = {
.name = "cr16",
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
.mult = 0, /* to be set */
.shift = 22,
.update_callback = cr16_update_callback,
.is_continuous = 1,
};
static int cr16_update_callback(void)
{
unsigned long flags, seq, usec, sec;
int change = 0;
/* Hold xtime_lock and adjust timeval. */
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = gettimeoffset();
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
/* Move adjusted usec's into sec's. */
while (usec >= USEC_PER_SEC) {
usec -= USEC_PER_SEC;
++sec;
/* since the cr16 cycle counters are not syncronized across CPUs,
we'll check if we should switch to a safe clocksource: */
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
clocksource_cr16.rating = 0;
clocksource_reselect();
change = 1;
}
/* Return adjusted result. */
tv->tv_sec = sec;
tv->tv_usec = usec;
return change;
}
EXPORT_SYMBOL(do_gettimeofday);
int
do_settimeofday (struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
{
/*
* This is revolting. We need to set "xtime"
* correctly. However, the value in this location is
* the value at the most recent update of wall time.
* Discover what correction gettimeofday would have
* done, and then undo it!
*/
nsec -= gettimeoffset() * 1000;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
void __init start_cpu_itimer(void)
{
@ -301,11 +218,18 @@ void __init start_cpu_itimer(void)
void __init time_init(void)
{
static struct pdc_tod tod_data;
unsigned long current_cr16_khz;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
start_cpu_itimer(); /* get CPU 0 started */
/* register at clocksource framework */
current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
clocksource_cr16.mult = clocksource_khz2mult(current_cr16_khz,
clocksource_cr16.shift);
clocksource_register(&clocksource_cr16);
if (pdc_tod_read(&tod_data) == 0) {
unsigned long flags;

View File

@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/kallsyms.h>
#include <linux/bug.h>
#include <asm/assembly.h>
#include <asm/system.h>
@ -39,6 +40,8 @@
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
@ -49,7 +52,7 @@
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
int printbinary(char *buf, unsigned long x, int nbits)
static int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
while (mask != 0) {
@ -61,7 +64,7 @@ int printbinary(char *buf, unsigned long x, int nbits)
return nbits;
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
@ -160,13 +163,13 @@ static void do_show_stack(struct unwind_frame_info *info)
{
int i = 1;
printk("Backtrace:\n");
printk(KERN_CRIT "Backtrace:\n");
while (i <= 16) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
printk(" [<" RFMT ">] ", info->ip);
printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
#ifdef CONFIG_KALLSYMS
print_symbol("%s\n", info->ip);
#else
@ -185,18 +188,19 @@ void show_stack(struct task_struct *task, unsigned long *s)
if (!task) {
unsigned long sp;
struct pt_regs *r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
r = kzalloc(sizeof(struct pt_regs), GFP_KERNEL);
if (!r)
return;
r->iaoq[0] = (unsigned long)&&HERE;
r->gr[2] = (unsigned long)__builtin_return_address(0);
r->gr[30] = sp;
unwind_frame_init(&info, current, r);
kfree(r);
{
struct pt_regs r;
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, current, &r);
}
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
@ -204,6 +208,11 @@ HERE:
do_show_stack(&info);
}
int is_valid_bugaddr(unsigned long iaoq)
{
return 1;
}
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs)) {
@ -222,15 +231,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
oops_in_progress = 1;
/* Amuse the user in a SPARC fashion */
printk(
" _______________________________ \n"
" < Your System ate a SPARC! Gah! >\n"
" ------------------------------- \n"
" \\ ^__^\n"
" \\ (xx)\\_______\n"
" (__)\\ )\\/\\\n"
" U ||----w |\n"
" || ||\n");
if (err) printk(
KERN_CRIT " _______________________________ \n"
KERN_CRIT " < Your System ate a SPARC! Gah! >\n"
KERN_CRIT " ------------------------------- \n"
KERN_CRIT " \\ ^__^\n"
KERN_CRIT " \\ (xx)\\_______\n"
KERN_CRIT " (__)\\ )\\/\\\n"
KERN_CRIT " U ||----w |\n"
KERN_CRIT " || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
@ -242,9 +251,20 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
if (!console_drivers)
pdc_console_restart();
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, current->pid, str, err);
if (err)
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, current->pid, str, err);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
show_regs(regs);
dump_stack();
if (in_interrupt())
panic("Fatal exception in interrupt");
@ -255,14 +275,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
}
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
do_exit(SIGSEGV);
}
@ -273,61 +285,45 @@ int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
void handle_gdb_break(struct pt_regs *regs, int wot)
static void handle_gdb_break(struct pt_regs *regs, int wot)
{
struct siginfo si;
si.si_code = wot;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
si.si_errno = 0;
si.si_code = wot;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
force_sig_info(SIGTRAP, &si, current);
}
void handle_break(unsigned iir, struct pt_regs *regs)
static void handle_break(struct pt_regs *regs)
{
struct siginfo si;
unsigned iir = regs->iir;
switch(iir) {
case 0x00:
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
current->pid, current->comm);
#endif
die_if_kernel("Breakpoint", regs, 0);
#ifdef PRINT_USER_FAULTS
show_regs(regs);
#endif
si.si_code = TRAP_BRKPT;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
si.si_signo = SIGTRAP;
force_sig_info(SIGTRAP, &si, current);
break;
case GDB_BREAK_INSN:
die_if_kernel("Breakpoint", regs, 0);
handle_gdb_break(regs, TRAP_BRKPT);
break;
default:
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
iir, current->pid, current->comm);
show_regs(regs);
#endif
si.si_signo = SIGTRAP;
si.si_code = TRAP_BRKPT;
si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
force_sig_info(SIGTRAP, &si, current);
return;
if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
/* check if a BUG() or WARN() trapped here. */
enum bug_trap_type tt;
tt = report_bug(regs->iaoq[0] & ~3);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->iaoq[0] += 4;
regs->iaoq[1] += 4;
return; /* return to next instruction when WARN_ON(). */
}
die_if_kernel("Unknown kernel breakpoint", regs,
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
}
#ifdef PRINT_USER_FAULTS
if (unlikely(iir != GDB_BREAK_INSN)) {
printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
current->pid, current->comm);
show_regs(regs);
}
#endif
int handle_toc(void)
{
printk(KERN_CRIT "TOC call.\n");
return 0;
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
}
static void default_trap(int code, struct pt_regs *regs)
@ -336,7 +332,7 @@ static void default_trap(int code, struct pt_regs *regs)
show_regs(regs);
}
void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
void transfer_pim_to_trap_frame(struct pt_regs *regs)
@ -554,7 +550,8 @@ void handle_interruption(int code, struct pt_regs *regs)
/* Low-priority machine check */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
flush_all_caches();
flush_cache_all();
flush_tlb_all();
cpu_lpmc(5, regs);
return;
@ -572,7 +569,7 @@ void handle_interruption(int code, struct pt_regs *regs)
case 9:
/* Break instruction trap */
handle_break(regs->iir,regs);
handle_break(regs);
return;
case 10:
@ -840,7 +837,7 @@ int __init check_ivt(void *iva)
return 0;
}
#ifndef __LP64__
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
#endif
extern const void fault_vector_20;
@ -852,7 +849,7 @@ void __init trap_init(void)
if (boot_cpu_data.cpu_type >= pcxu)
iva = (void *) &fault_vector_20;
else
#ifdef __LP64__
#ifdef CONFIG_64BIT
panic("Can't boot 64-bit OS on PA1.1 processor!");
#else
iva = (void *) &fault_vector_11;

View File

@ -20,8 +20,11 @@
*
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
/* #define DEBUG_UNALIGNED 1 */
@ -32,7 +35,7 @@
#define DPRINTF(fmt, args...)
#endif
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
@ -147,15 +150,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg)
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r20", FIXUP_BRANCH_CLOBBER );
@ -192,15 +188,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY(2b, 4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
@ -224,7 +213,7 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
regs->isr, regs->ior, toreg);
#ifdef CONFIG_PA20
#ifndef __LP64__
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
@ -243,15 +232,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
"4: ldi -2, %1\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (val), "=r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
@ -275,17 +257,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
"5: ldi -2, %2\n"
FIXUP_BRANCH(4b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,5b\n"
" .dword 2b,5b\n"
" .dword 3b,5b\n"
#else
" .word 1b,5b\n"
" .word 2b,5b\n"
" .word 3b,5b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,5b)
ASM_EXCEPTIONTABLE_ENTRY(2b,5b)
ASM_EXCEPTIONTABLE_ENTRY(3b,5b)
: "=r" (valh), "=r" (vall), "=r" (ret)
: "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
: "r19", "r20", FIXUP_BRANCH_CLOBBER );
@ -325,15 +299,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg)
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", FIXUP_BRANCH_CLOBBER );
@ -379,15 +346,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
"4: ldi -2, %0\n"
FIXUP_BRANCH(3b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,4b\n"
" .dword 2b,4b\n"
#else
" .word 1b,4b\n"
" .word 2b,4b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,4b)
ASM_EXCEPTIONTABLE_ENTRY(2b,4b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
@ -410,7 +370,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
val, regs->isr, regs->ior);
#ifdef CONFIG_PA20
#ifndef __LP64__
#ifndef CONFIG_64BIT
if (!flop)
return -1;
#endif
@ -436,19 +396,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"6: ldi -2, %0\n"
FIXUP_BRANCH(5b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,6b\n"
" .dword 2b,6b\n"
" .dword 3b,6b\n"
" .dword 4b,6b\n"
#else
" .word 1b,6b\n"
" .word 2b,6b\n"
" .word 3b,6b\n"
" .word 4b,6b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,6b)
ASM_EXCEPTIONTABLE_ENTRY(2b,6b)
ASM_EXCEPTIONTABLE_ENTRY(3b,6b)
ASM_EXCEPTIONTABLE_ENTRY(4b,6b)
: "=r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
@ -479,21 +430,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
"7: ldi -2, %0\n"
FIXUP_BRANCH(6b)
" .previous\n"
" .section __ex_table,\"aw\"\n"
#ifdef __LP64__
" .dword 1b,7b\n"
" .dword 2b,7b\n"
" .dword 3b,7b\n"
" .dword 4b,7b\n"
" .dword 5b,7b\n"
#else
" .word 1b,7b\n"
" .word 2b,7b\n"
" .word 3b,7b\n"
" .word 4b,7b\n"
" .word 5b,7b\n"
#endif
" .previous\n"
ASM_EXCEPTIONTABLE_ENTRY(1b,7b)
ASM_EXCEPTIONTABLE_ENTRY(2b,7b)
ASM_EXCEPTIONTABLE_ENTRY(3b,7b)
ASM_EXCEPTIONTABLE_ENTRY(4b,7b)
ASM_EXCEPTIONTABLE_ENTRY(5b,7b)
: "=r" (ret)
: "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1", FIXUP_BRANCH_CLOBBER );

View File

@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>

View File

@ -68,6 +68,8 @@ SECTIONS
RODATA
BUG_TABLE
/* writeable */
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
that we can properly leave these

View File

@ -17,7 +17,7 @@ raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
};
#endif
#ifdef __LP64__
#ifdef CONFIG_64BIT
unsigned long __xchg64(unsigned long x, unsigned long *ptr)
{
unsigned long temp, flags;
@ -56,7 +56,7 @@ unsigned long __xchg8(char x, char *ptr)
}
#ifdef __LP64__
#ifdef CONFIG_64BIT
unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
{
unsigned long flags;

View File

@ -22,6 +22,7 @@
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
@ -30,7 +31,7 @@
/* t2 = smp_processor_id() */
mfctl 30,\t2
ldw TI_CPU(\t2),\t2
#ifdef __LP64__
#ifdef CONFIG_64BIT
extrd,u \t2,63,32,\t2
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
@ -58,33 +59,34 @@
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
.export fixup_get_user_skip_1
fixup_get_user_skip_1:
ENTRY(fixup_get_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
ENDPROC(fixup_get_user_skip_1)
.export fixup_get_user_skip_2
fixup_get_user_skip_2:
ENTRY(fixup_get_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
ENDPROC(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */
.export fixup_put_user_skip_1
fixup_put_user_skip_1:
ENTRY(fixup_put_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
ENDPROC(fixup_put_user_skip_1)
.export fixup_put_user_skip_2
fixup_put_user_skip_2:
ENTRY(fixup_put_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
ENDPROC(fixup_put_user_skip_2)

View File

@ -37,6 +37,7 @@
#include <asm/assembly.h>
#include <asm/errno.h>
#include <linux/linkage.h>
/*
* get_sr gets the appropriate space value into
@ -67,8 +68,7 @@
* otherwise strlen (i.e. excludes zero byte)
*/
.export lstrncpy_from_user,code
lstrncpy_from_user:
ENTRY(lstrncpy_from_user)
.proc
.callinfo NO_CALLS
.entry
@ -87,6 +87,7 @@ $lsfu_exit:
bv %r0(%r2)
nop
.exit
ENDPROC(lstrncpy_from_user)
.section .fixup,"ax"
3: fixup_branch $lsfu_exit
@ -94,13 +95,8 @@ $lsfu_exit:
.previous
.section __ex_table,"aw"
#ifdef __LP64__
.dword 1b,3b
.dword 2b,3b
#else
.word 1b,3b
.word 2b,3b
#endif
ASM_ULONG_INSN 1b,3b
ASM_ULONG_INSN 2b,3b
.previous
.procend
@ -112,8 +108,7 @@ $lsfu_exit:
* otherwise, returns number of bytes not transferred.
*/
.export lclear_user,code
lclear_user:
ENTRY(lclear_user)
.proc
.callinfo NO_CALLS
.entry
@ -127,6 +122,7 @@ $lclu_done:
bv %r0(%r2)
copy %r25,%r28
.exit
ENDPROC(lclear_user)
.section .fixup,"ax"
2: fixup_branch $lclu_done
@ -134,11 +130,7 @@ $lclu_done:
.previous
.section __ex_table,"aw"
#ifdef __LP64__
.dword 1b,2b
#else
.word 1b,2b
#endif
ASM_ULONG_INSN 1b,2b
.previous
.procend
@ -151,8 +143,7 @@ $lclu_done:
* else strlen + 1 (i.e. includes zero byte).
*/
.export lstrnlen_user,code
lstrnlen_user:
ENTRY(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
@ -172,6 +163,7 @@ $lslen_done:
$lslen_nzero:
b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */
ENDPROC(lstrnlen_user)
.section .fixup,"ax"
3: fixup_branch $lslen_done
@ -179,13 +171,8 @@ $lslen_nzero:
.previous
.section __ex_table,"aw"
#ifdef __LP64__
.dword 1b,3b
.dword 2b,3b
#else
.word 1b,3b
.word 2b,3b
#endif
ASM_ULONG_INSN 1b,3b
ASM_ULONG_INSN 2b,3b
.previous
.procend

View File

@ -96,30 +96,18 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define DPRINTF(fmt, args...)
#endif
#ifndef __LP64__
#define EXC_WORD ".word"
#else
#define EXC_WORD ".dword"
#endif
#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t" EXC_WORD "\t1b\n" \
"\t" EXC_WORD "\t" #_e "\n" \
"\t.previous\n" \
"1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t), "+r"(_a) \
: \
: "r8")
#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t" EXC_WORD "\t1b\n" \
"\t" EXC_WORD "\t" #_e "\n" \
"\t.previous\n" \
"1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: "+r"(_a) \
: _tt(_t) \
: "r8")
@ -133,22 +121,16 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn " " #_o "(" _s ",%1), %0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t" EXC_WORD "\t1b\n" \
"\t" EXC_WORD "\t" #_e "\n" \
"\t.previous\n" \
"1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t) \
: "r"(_a) \
: "r8")
#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
__asm__ __volatile__ ( \
"1:\t" #_insn " %0, " #_o "(" _s ",%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t" EXC_WORD "\t1b\n" \
"\t" EXC_WORD "\t" #_e "\n" \
"\t.previous\n" \
"1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: \
: _tt(_t), "r"(_a) \
: "r8")
@ -167,8 +149,8 @@ extern inline void prefetch_dst(const void *addr)
__asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
}
#else
#define prefetch_src(addr)
#define prefetch_dst(addr)
#define prefetch_src(addr) do { } while(0)
#define prefetch_dst(addr) do { } while(0)
#endif
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words

View File

@ -24,10 +24,6 @@
/* dumped to the console via printk) */
/* Defines for parisc_acctyp() */
#define READ 0
#define WRITE 1
/* Various important other fields */
#define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0)

View File

@ -6,7 +6,7 @@
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2004 Randolph Chung (tausq@debian.org)
* Copyright 2006 Helge Deller (deller@gmx.de)
* Copyright 2006-2007 Helge Deller (deller@gmx.de)
*
*/
@ -24,6 +24,7 @@
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
@ -65,11 +66,11 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
int npmem_ranges __read_mostly;
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
#else /* !__LP64__ */
#else /* !CONFIG_64BIT */
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !__LP64__ */
#endif /* !CONFIG_64BIT */
static unsigned long mem_limit __read_mostly = MAX_MEM;
@ -452,6 +453,8 @@ unsigned long pcxl_dma_start __read_mostly;
void __init mem_init(void)
{
int codesize, reservedpages, datasize, initsize;
high_memory = __va((max_pfn << PAGE_SHIFT));
#ifndef CONFIG_DISCONTIGMEM
@ -466,7 +469,32 @@ void __init mem_init(void)
}
#endif
printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
codesize = (unsigned long)_etext - (unsigned long)_text;
datasize = (unsigned long)_edata - (unsigned long)_etext;
initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
reservedpages = 0;
{
unsigned long pfn;
#ifdef CONFIG_DISCONTIGMEM
int i;
for (i = 0; i < npmem_ranges; i++) {
for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) {
if (PageReserved(pfn_to_page(pfn)))
reservedpages++;
}
}
#else /* !CONFIG_DISCONTIGMEM */
for (pfn = 0; pfn < max_pfn; pfn++) {
/*
* Only count reserved RAM pages
*/
if (PageReserved(pfn_to_page(pfn)))
reservedpages++;
}
#endif
}
#ifdef CONFIG_PA11
if (hppa_dma_ops == &pcxl_dma_ops) {
@ -480,6 +508,38 @@ void __init mem_init(void)
vmalloc_start = SET_MAP_OFFSET(MAP_START);
#endif
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10
);
#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
printk("virtual kernel memory layout:\n"
" vmalloc : 0x%p - 0x%p (%4ld MB)\n"
" memory : 0x%p - 0x%p (%4ld MB)\n"
" .init : 0x%p - 0x%p (%4ld kB)\n"
" .data : 0x%p - 0x%p (%4ld kB)\n"
" .text : 0x%p - 0x%p (%4ld kB)\n",
(void*)VMALLOC_START, (void*)VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
__va(0), high_memory,
((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
__init_begin, __init_end,
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
_etext, _edata,
((unsigned long)_edata - (unsigned long)_etext) >> 10,
_text, _etext,
((unsigned long)_etext - (unsigned long)_text) >> 10);
#endif
}
unsigned long *empty_zero_page __read_mostly;
@ -547,7 +607,7 @@ void show_mem(void)
printk("Zone list for zone %d on node %d: ", j, i);
for (k = 0; zl->zones[k] != NULL; k++)
printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
printk("[%ld/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
printk("\n");
}
}

View File

@ -26,7 +26,7 @@
*/
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void *addr;
void __iomem *addr;
struct vm_struct *area;
unsigned long offset, last_addr;
pgprot_t pgprot;
@ -80,14 +80,14 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
if (!area)
return NULL;
addr = area->addr;
addr = (void __iomem *) area->addr;
if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
phys_addr, pgprot)) {
vfree(addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
return (void __iomem *) (offset + (char __iomem *)addr);
}
EXPORT_SYMBOL(__ioremap);

View File

@ -1,166 +0,0 @@
/*
* kmap/page table map and unmap support routines
*
* Copyright 1999,2000 Hewlett-Packard Company
* Copyright 2000 John Marvin <jsm at hp.com>
* Copyright 2000 Grant Grundler <grundler at parisc-linux.org>
* Copyright 2000 Philipp Rumpf <prumpf@tux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
** Stolen mostly from arch/parisc/kernel/pci-dma.c
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/page.h> /* get_order */
#undef flush_cache_all
#define flush_cache_all flush_all_caches
typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg);
#if 0
/* XXX This routine could be used with iterate_page() to replace
* unmap_uncached_page() and save a little code space but I didn't
* do that since I'm not certain whether this is the right path. -PB
*/
static void unmap_cached_pte(pte_t * pte, unsigned long addr, unsigned long arg)
{
pte_t page = *pte;
pte_clear(&init_mm, addr, pte);
if (!pte_none(page)) {
if (pte_present(page)) {
unsigned long map_nr = pte_pagenr(page);
if (map_nr < max_mapnr)
__free_page(mem_map + map_nr);
} else {
printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
}
}
}
#endif
/* These two routines should probably check a few things... */
static void set_uncached(pte_t * pte, unsigned long arg)
{
pte_val(*pte) |= _PAGE_NO_CACHE;
}
static void set_cached(pte_t * pte, unsigned long arg)
{
pte_val(*pte) &= ~_PAGE_NO_CACHE;
}
static inline void iterate_pte(pmd_t * pmd, unsigned long address,
unsigned long size, pte_iterator_t op,
unsigned long arg)
{
pte_t *pte;
unsigned long end;
if (pmd_none(*pmd))
return;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return;
}
pte = pte_offset(pmd, address);
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
op(pte, arg);
address += PAGE_SIZE;
pte++;
} while (address < end);
}
static inline void iterate_pmd(pgd_t * dir, unsigned long address,
unsigned long size, pte_iterator_t op,
unsigned long arg)
{
pmd_t *pmd;
unsigned long end;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, address);
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
iterate_pte(pmd, address, end - address, op, arg);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address < end);
}
static void iterate_pages(unsigned long address, unsigned long size,
pte_iterator_t op, unsigned long arg)
{
pgd_t *dir;
unsigned long end = address + size;
dir = pgd_offset_k(address);
flush_cache_all();
do {
iterate_pmd(dir, address, end - address, op, arg);
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_tlb_all();
}
void
kernel_set_cachemode(unsigned long vaddr, unsigned long size, int what)
{
switch (what) {
case IOMAP_FULL_CACHING:
iterate_pages(vaddr, size, set_cached, 0);
flush_tlb_range(NULL, vaddr, size);
break;
case IOMAP_NOCACHE_SER:
iterate_pages(vaddr, size, set_uncached, 0);
flush_tlb_range(NULL, vaddr, size);
break;
default:
printk(KERN_CRIT
"kernel_set_cachemode mode %d not understood\n",
what);
break;
}
}

View File

@ -18,6 +18,6 @@ int __init oprofile_arch_init(struct oprofile_operations * ops)
}
void oprofile_arch_exit()
void oprofile_arch_exit(void)
{
}

View File

@ -236,7 +236,7 @@ static int __init
agp_ioc_init(void __iomem *ioc_regs)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u64 *iova_base, *io_pdir, io_tlb_ps;
u64 iova_base, *io_pdir, io_tlb_ps;
int io_tlb_shift;
printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");

View File

@ -430,10 +430,10 @@ config HPLANCE
config LASI_82596
tristate "Lasi ethernet"
depends on NET_ETHERNET && PARISC && GSC_LASI
depends on NET_ETHERNET && GSC
help
Say Y here to support the on-board Intel 82596 ethernet controller
built into Hewlett-Packard PA-RISC machines.
Say Y here to support the builtin Intel 82596 ethernet controller
found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
config MIPS_JAZZ_SONIC
tristate "MIPS JAZZ onboard SONIC Ethernet support"

View File

@ -384,7 +384,7 @@ struct i596_private {
struct device *dev;
};
static char init_setup[] =
static const char init_setup[] =
{
0x8E, /* length, prefetch on */
0xC8, /* fifo to 8, monitor off */
@ -683,7 +683,7 @@ static int init_i596_mem(struct net_device *dev)
enable_irq(dev->irq); /* enable IRQs from LAN */
DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
memcpy(lp->cf_cmd.i596_config, init_setup, 14);
memcpy(lp->cf_cmd.i596_config, init_setup, sizeof(init_setup));
lp->cf_cmd.cmd.command = CmdConfigure;
CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
i596_add_cmd(dev, &lp->cf_cmd.cmd);
@ -1156,32 +1156,12 @@ static int __devinit i82596_probe(struct net_device *dev,
dma_addr_t dma_addr;
/* This lot is ensure things have been cache line aligned. */
if (sizeof(struct i596_rfd) != 32) {
printk("82596: sizeof(struct i596_rfd) = %d\n",
(int)sizeof(struct i596_rfd));
return -ENODEV;
}
if ((sizeof(struct i596_rbd) % 32) != 0) {
printk("82596: sizeof(struct i596_rbd) = %d\n",
(int)sizeof(struct i596_rbd));
return -ENODEV;
}
if ((sizeof(struct tx_cmd) % 32) != 0) {
printk("82596: sizeof(struct tx_cmd) = %d\n",
(int)sizeof(struct tx_cmd));
return -ENODEV;
}
if (sizeof(struct i596_tbd) != 32) {
printk("82596: sizeof(struct i596_tbd) = %d\n",
(int)sizeof(struct i596_tbd));
return -ENODEV;
}
BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
#ifndef __LP64__
if (sizeof(struct i596_private) > 4096) {
printk("82596: sizeof(struct i596_private) = %d\n",
(int)sizeof(struct i596_private));
return -ENODEV;
}
BUILD_BUG_ON(sizeof(struct i596_private) > 4096);
#endif
if (!dev->base_addr || !dev->irq)

View File

@ -10,10 +10,6 @@
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This Driver currently only supports the console (port 0) on the MUX.
** Additional work will be needed on this driver to enable the full
** functionality of the MUX.
**
*/
#include <linux/types.h>
@ -67,7 +63,7 @@ static int hppb_probe(struct parisc_device *dev)
}
card = card->next;
}
printk(KERN_INFO "Found GeckoBoa at 0x%lx\n", dev->hpa.start);
printk(KERN_INFO "Found GeckoBoa at 0x%x\n", dev->hpa.start);
card->hpa = dev->hpa.start;
card->mmio_region.name = "HP-PB Bus";
@ -78,16 +74,18 @@ static int hppb_probe(struct parisc_device *dev)
status = ccio_request_resource(dev, &card->mmio_region);
if(status < 0) {
printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08lx, %08lx)\n",
printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08x, %08x)\n",
__FILE__, card->mmio_region.start, card->mmio_region.end);
}
return 0;
}
static struct parisc_device_id hppb_tbl[] = {
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, 0x500, 0xc },
{ HPHW_BCPORT, HVERSION_REV_ANY_ID, 0x500, 0xc }, /* E25 and K */
{ HPHW_BCPORT, 0x0, 0x501, 0xc }, /* E35 */
{ HPHW_BCPORT, 0x0, 0x502, 0xc }, /* E45 */
{ HPHW_BCPORT, 0x0, 0x503, 0xc }, /* E55 */
{ 0, }
};

View File

@ -132,7 +132,7 @@ struct iosapic_irt {
struct vector_info {
struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
struct irt_entry *irte; /* IRT entry */
u32 *eoi_addr; /* precalculate EOI reg address */
u32 __iomem *eoi_addr; /* precalculate EOI reg address */
u32 eoi_data; /* IA64: ? PA: swapped txn_data */
int txn_irq; /* virtual IRQ number for processor */
ulong txn_addr; /* IA64: id_eid PA: partial HPA */

View File

@ -168,7 +168,8 @@ lba_dump_res(struct resource *r, int d)
printk(KERN_DEBUG "(%p)", r->parent);
for (i = d; i ; --i) printk(" ");
printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, r->start, r->end, r->flags);
printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
(long)r->start, (long)r->end, r->flags);
lba_dump_res(r->child, d+2);
lba_dump_res(r->sibling, d);
}
@ -647,7 +648,7 @@ truncate_pat_collision(struct resource *root, struct resource *new)
printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
"to [%lx,%lx]\n",
start, end,
new->start, new->end );
(long)new->start, (long)new->end );
return 0; /* truncation successful */
}
@ -715,8 +716,8 @@ lba_fixup_bus(struct pci_bus *bus)
printk("FAILED: lba_fixup_bus() request for "
"elmmio_space [%lx/%lx]\n",
ldev->hba.elmmio_space.start,
ldev->hba.elmmio_space.end);
(long)ldev->hba.elmmio_space.start,
(long)ldev->hba.elmmio_space.end);
/* lba_dump_res(&iomem_resource, 2); */
/* BUG(); */
@ -738,15 +739,15 @@ lba_fixup_bus(struct pci_bus *bus)
&(ldev->hba.lmmio_space))) {
printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
ldev->hba.lmmio_space.start,
ldev->hba.lmmio_space.end);
(long)ldev->hba.lmmio_space.start,
(long)ldev->hba.lmmio_space.end);
} else {
err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
if (err < 0) {
printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
"lmmio_space [%lx/%lx]\n",
ldev->hba.lmmio_space.start,
ldev->hba.lmmio_space.end);
(long)ldev->hba.lmmio_space.start,
(long)ldev->hba.lmmio_space.end);
} else
bus->resource[i++] = &(ldev->hba.lmmio_space);
}
@ -758,8 +759,8 @@ lba_fixup_bus(struct pci_bus *bus)
if (err < 0) {
printk("FAILED: lba_fixup_bus() request for "
"gmmio_space [%lx/%lx]\n",
ldev->hba.gmmio_space.start,
ldev->hba.gmmio_space.end);
(long)ldev->hba.gmmio_space.start,
(long)ldev->hba.gmmio_space.end);
lba_dump_res(&iomem_resource, 2);
BUG();
}
@ -980,7 +981,7 @@ LBA_PORT_IN(32, 0)
#define LBA_PORT_OUT(size, mask) \
static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
{ \
void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \
void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
WRITE_REG##size(val, where); \
/* flush the I/O down to the elroy at least */ \
@ -1063,16 +1064,16 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
/* used to fix up pre-initialized MEM BARs */
if (!lba_dev->hba.lmmio_space.start) {
sprintf(lba_dev->hba.lmmio_name,
"PCI%02lx LMMIO",
lba_dev->hba.bus_num.start);
"PCI%02x LMMIO",
(int)lba_dev->hba.bus_num.start);
lba_dev->hba.lmmio_space_offset = p->start -
io->start;
r = &lba_dev->hba.lmmio_space;
r->name = lba_dev->hba.lmmio_name;
} else if (!lba_dev->hba.elmmio_space.start) {
sprintf(lba_dev->hba.elmmio_name,
"PCI%02lx ELMMIO",
lba_dev->hba.bus_num.start);
"PCI%02x ELMMIO",
(int)lba_dev->hba.bus_num.start);
r = &lba_dev->hba.elmmio_space;
r->name = lba_dev->hba.elmmio_name;
} else {
@ -1089,8 +1090,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
case PAT_GMMIO:
/* MMIO space > 4GB phys addr; for 64-bit BAR */
sprintf(lba_dev->hba.gmmio_name, "PCI%02lx GMMIO",
lba_dev->hba.bus_num.start);
sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
(int)lba_dev->hba.bus_num.start);
r = &lba_dev->hba.gmmio_space;
r->name = lba_dev->hba.gmmio_name;
r->start = p->start;
@ -1112,8 +1113,8 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
*/
lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
lba_dev->hba.bus_num.start);
sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
(int)lba_dev->hba.bus_num.start);
r = &lba_dev->hba.io_space;
r->name = lba_dev->hba.io_name;
r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
@ -1166,8 +1167,8 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
** Legacy boxes but it's nice to see in /proc/iomem.
*/
r = &(lba_dev->hba.lmmio_space);
sprintf(lba_dev->hba.lmmio_name, "PCI%02lx LMMIO",
lba_dev->hba.bus_num.start);
sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
(int)lba_dev->hba.bus_num.start);
r->name = lba_dev->hba.lmmio_name;
#if 1
@ -1275,8 +1276,8 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
** an existing (but unused portion of) distributed range.
*/
r = &(lba_dev->hba.elmmio_space);
sprintf(lba_dev->hba.elmmio_name, "PCI%02lx ELMMIO",
lba_dev->hba.bus_num.start);
sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
(int)lba_dev->hba.bus_num.start);
r->name = lba_dev->hba.elmmio_name;
#if 1
@ -1297,8 +1298,8 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
#endif
r = &(lba_dev->hba.io_space);
sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
lba_dev->hba.bus_num.start);
sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
(int)lba_dev->hba.bus_num.start);
r->name = lba_dev->hba.io_name;
r->flags = IORESOURCE_IO;
r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
@ -1406,13 +1407,20 @@ lba_hw_init(struct lba_device *d)
return 0;
}
/*
* Unfortunately, when firmware numbers busses, it doesn't take into account
* Cardbus bridges. So we have to renumber the busses to suit ourselves.
* Elroy/Mercury don't actually know what bus number they're attached to;
* we use bus 0 to indicate the directly attached bus and any other bus
* number will be taken care of by the PCI-PCI bridge.
*/
static unsigned int lba_next_bus = 0;
/*
** Determine if lba should claim this chip (return 0) or not (return 1).
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
* Determine if lba should claim this chip (return 0) or not (return 1).
* If so, initialize the chip and tell other partners in crime they
* have work to do.
*/
static int __init
lba_driver_probe(struct parisc_device *dev)
{
@ -1440,7 +1448,7 @@ lba_driver_probe(struct parisc_device *dev)
}
printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
version, func_class & 0xf, dev->hpa.start);
version, func_class & 0xf, (long)dev->hpa.start);
if (func_class < 2) {
printk(KERN_WARNING "Can't support LBA older than "
@ -1470,17 +1478,16 @@ lba_driver_probe(struct parisc_device *dev)
*/
printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
minor, func_class, dev->hpa.start);
minor, func_class, (long)dev->hpa.start);
cfg_ops = &mercury_cfg_ops;
} else {
printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start);
printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
(long)dev->hpa.start);
return -ENODEV;
}
/*
** Tell I/O SAPIC driver we have a IRQ handler/region.
*/
/* Tell I/O SAPIC driver we have a IRQ handler/region. */
tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
/* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't
@ -1529,16 +1536,17 @@ lba_driver_probe(struct parisc_device *dev)
lba_legacy_resources(dev, lba_dev);
}
/*
** Tell PCI support another PCI bus was found.
** Walks PCI bus for us too.
*/
if (lba_dev->hba.bus_num.start < lba_next_bus)
lba_dev->hba.bus_num.start = lba_next_bus;
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
cfg_ops, NULL);
if (lba_bus)
if (lba_bus) {
lba_next_bus = lba_bus->subordinate + 1;
pci_bus_add_devices(lba_bus);
}
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {

View File

@ -66,8 +66,8 @@ static char lcd_text_default[32] __read_mostly;
static struct workqueue_struct *led_wq;
static void led_work_func(void *);
static DECLARE_WORK(led_task, led_work_func, NULL);
static void led_work_func(struct work_struct *);
static DECLARE_DELAYED_WORK(led_task, led_work_func);
#if 0
#define DPRINTK(x) printk x
@ -136,7 +136,7 @@ static int start_task(void)
/* Create the work queue and queue the LED task */
led_wq = create_singlethread_workqueue("led_wq");
queue_work(led_wq, &led_task);
queue_delayed_work(led_wq, &led_task, 0);
return 0;
}
@ -441,7 +441,7 @@ static __inline__ int led_get_diskio_activity(void)
#define LED_UPDATE_INTERVAL (1 + (HZ*19/1000))
static void led_work_func (void *unused)
static void led_work_func (struct work_struct *unused)
{
static unsigned long last_jiffies;
static unsigned long count_HZ; /* counter in range 0..HZ */
@ -588,7 +588,7 @@ int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long d
/* Ensure the work is queued */
if (led_wq) {
queue_work(led_wq, &led_task);
queue_delayed_work(led_wq, &led_task, 0);
}
return 0;
@ -629,7 +629,7 @@ void __init register_led_regions(void)
** avoid a race condition while writing the CMD/DATA register pair.
**
*/
int lcd_print( char *str )
int lcd_print( const char *str )
{
int i;
@ -658,7 +658,7 @@ int lcd_print( char *str )
/* re-queue the work */
if (led_wq) {
queue_work(led_wq, &led_task);
queue_delayed_work(led_wq, &led_task, 0);
}
return lcd_info.lcd_width;

View File

@ -2,7 +2,7 @@
* linux/drivers/parisc/power.c
* HP PARISC soft power switch support driver
*
* Copyright (c) 2001-2005 Helge Deller <deller@gmx.de>
* Copyright (c) 2001-2007 Helge Deller <deller@gmx.de>
* All rights reserved.
*
*
@ -29,7 +29,6 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
*
*
*
* HINT:
* Support of the soft power switch button may be enabled or disabled at
* runtime through the "/proc/sys/kernel/power" procfs entry.
@ -38,34 +37,28 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <asm/pdc.h>
#include <asm/io.h>
#include <asm/led.h>
#include <asm/uaccess.h>
#define DRIVER_NAME "powersw"
#define KTHREAD_NAME "kpowerswd"
#ifdef DEBUG
# define DPRINTK(x...) printk(x)
#else
# define DPRINTK(x...)
#endif
/* filename in /proc which can be used to enable/disable the power switch */
#define SYSCTL_FILENAME "sys/kernel/power"
/* how often should the power button be polled ? */
#define POWERSWITCH_POLL_PER_SEC 2
/* how long does the power button needs to be down until we react ? */
#define POWERSWITCH_DOWN_SEC 2
/* assembly code to access special registers */
/* taken from PCXL ERS page 82 */
#define DIAG_CODE(code) (0x14000000 + ((code)<<5))
/* this will go to processor.h or any other place... */
/* taken from PCXL ERS page 82 */
#define MFCPU_X(rDiagReg, t_ch, t_th, code) \
(DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) )
@ -76,111 +69,95 @@
#define __getDIAG(dr) ( { \
register unsigned long __res asm("r28");\
__asm__ __volatile__ ( \
".word %1\n nop\n" : "=&r" (__res) : "i" (MFCPU_T(dr,28)) \
".word %1" : "=&r" (__res) : "i" (MFCPU_T(dr,28) ) \
); \
__res; \
} )
static void deferred_poweroff(void *dummy)
{
if (kill_cad_pid(SIGINT, 1)) {
/* just in case killing init process failed */
machine_power_off();
}
}
/*
* This function gets called from interrupt context.
* As it's called within an interrupt, it wouldn't sync if we don't
* use schedule_work().
*/
static DECLARE_WORK(poweroff_work, deferred_poweroff, NULL);
static void poweroff(void)
{
static int powering_off __read_mostly;
if (powering_off)
return;
powering_off++;
schedule_work(&poweroff_work);
}
/* local time-counter for shutdown */
/* local shutdown counter */
static int shutdown_timer __read_mostly;
/* check, give feedback and start shutdown after one second */
static void process_shutdown(void)
{
if (shutdown_timer == 0)
DPRINTK(KERN_INFO "Shutdown requested...\n");
printk(KERN_ALERT KTHREAD_NAME ": Shutdown requested...\n");
shutdown_timer++;
/* wait until the button was pressed for 1 second */
if (shutdown_timer == HZ) {
#if defined (DEBUG) || defined(CONFIG_CHASSIS_LCD_LED)
static char msg[] = "Shutting down...";
#endif
DPRINTK(KERN_INFO "%s\n", msg);
if (shutdown_timer == (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC)) {
static const char msg[] = "Shutting down...";
printk(KERN_INFO KTHREAD_NAME ": %s\n", msg);
lcd_print(msg);
poweroff();
/* send kill signal */
if (kill_cad_pid(SIGINT, 1)) {
/* just in case killing init process failed */
if (pm_power_off)
pm_power_off();
}
}
}
/* main power switch tasklet struct (scheduled from time.c) */
DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0);
/* main power switch task struct */
static struct task_struct *power_task;
/* filename in /proc which can be used to enable/disable the power switch */
#define SYSCTL_FILENAME "sys/kernel/power"
/* soft power switch enabled/disabled */
int pwrsw_enabled __read_mostly = 1;
/*
* On gecko style machines (e.g. 712/xx and 715/xx)
* the power switch status is stored in Bit 0 ("the highest bit")
* of CPU diagnose register 25.
*
*/
static void gecko_tasklet_func(unsigned long unused)
/* main kernel thread worker. It polls the button state */
static int kpowerswd(void *param)
{
if (unlikely(!pwrsw_enabled))
return;
__set_current_state(TASK_RUNNING);
if (__getDIAG(25) & 0x80000000) {
/* power switch button not pressed or released again */
/* Warning: Some machines do never reset this DIAG flag! */
shutdown_timer = 0;
} else {
process_shutdown();
}
}
do {
int button_not_pressed;
unsigned long soft_power_reg = (unsigned long) param;
schedule_timeout_interruptible(pwrsw_enabled ? HZ : HZ/POWERSWITCH_POLL_PER_SEC);
__set_current_state(TASK_RUNNING);
if (unlikely(!pwrsw_enabled))
continue;
if (soft_power_reg) {
/*
* Non-Gecko-style machines:
* Check the power switch status which is read from the
* real I/O location at soft_power_reg.
* Bit 31 ("the lowest bit) is the status of the power switch.
* This bit is "1" if the button is NOT pressed.
*/
button_not_pressed = (gsc_readl(soft_power_reg) & 0x1);
} else {
/*
* On gecko style machines (e.g. 712/xx and 715/xx)
* the power switch status is stored in Bit 0 ("the highest bit")
* of CPU diagnose register 25.
* Warning: Some machines never reset the DIAG flag, even if
* the button has been released again.
*/
button_not_pressed = (__getDIAG(25) & 0x80000000);
}
if (likely(button_not_pressed)) {
if (unlikely(shutdown_timer && /* avoid writing if not necessary */
shutdown_timer < (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC))) {
shutdown_timer = 0;
printk(KERN_INFO KTHREAD_NAME ": Shutdown request aborted.\n");
}
} else
process_shutdown();
} while (!kthread_should_stop());
/*
* Check the power switch status which is read from the
* real I/O location at soft_power_reg.
* Bit 31 ("the lowest bit) is the status of the power switch.
*/
static void polling_tasklet_func(unsigned long soft_power_reg)
{
unsigned long current_status;
if (unlikely(!pwrsw_enabled))
return;
current_status = gsc_readl(soft_power_reg);
if (current_status & 0x1) {
/* power switch button not pressed */
shutdown_timer = 0;
} else {
process_shutdown();
}
return 0;
}
@ -220,7 +197,7 @@ static struct notifier_block parisc_panic_block = {
static int __init power_init(void)
{
unsigned long ret;
unsigned long soft_power_reg = 0;
unsigned long soft_power_reg;
#if 0
request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt,
@ -235,44 +212,44 @@ static int __init power_init(void)
soft_power_reg = -1UL;
switch (soft_power_reg) {
case 0: printk(KERN_INFO "Gecko-style soft power switch enabled.\n");
power_tasklet.func = gecko_tasklet_func;
case 0: printk(KERN_INFO DRIVER_NAME ": Gecko-style soft power switch enabled.\n");
break;
case -1UL: printk(KERN_INFO "Soft power switch support not available.\n");
case -1UL: printk(KERN_INFO DRIVER_NAME ": Soft power switch support not available.\n");
return -ENODEV;
default: printk(KERN_INFO "Soft power switch enabled, polling @ 0x%08lx.\n",
default: printk(KERN_INFO DRIVER_NAME ": Soft power switch at 0x%08lx enabled.\n",
soft_power_reg);
power_tasklet.data = soft_power_reg;
power_tasklet.func = polling_tasklet_func;
}
power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME);
if (IS_ERR(power_task)) {
printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n");
pdc_soft_power_button(0);
return -EIO;
}
/* Register a call for panic conditions. */
atomic_notifier_chain_register(&panic_notifier_list,
&parisc_panic_block);
tasklet_enable(&power_tasklet);
return 0;
}
static void __exit power_exit(void)
{
if (!power_tasklet.func)
return;
kthread_stop(power_task);
tasklet_disable(&power_tasklet);
atomic_notifier_chain_unregister(&panic_notifier_list,
&parisc_panic_block);
power_tasklet.func = NULL;
pdc_soft_power_button(0);
}
module_init(power_init);
arch_initcall(power_init);
module_exit(power_exit);
MODULE_AUTHOR("Helge Deller");
MODULE_AUTHOR("Helge Deller <deller@gmx.de>");
MODULE_DESCRIPTION("Soft power switch driver");
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -109,7 +109,7 @@ static unsigned long piranha_bad_128k = 0;
#ifdef SBA_AGP_SUPPORT
static int sba_reserve_agpgart = 1;
module_param(sba_reserve_agpgart, int, 1);
module_param(sba_reserve_agpgart, int, 0444);
MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
@ -846,7 +846,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
if (!hwdev) {
/* only support PCI */
*dma_handle = 0;
return 0;
return NULL;
}
ret = (void *) __get_free_pages(gfp, get_order(size));

View File

@ -51,7 +51,11 @@
#define MUX_NR 256
static unsigned int port_cnt __read_mostly;
static struct uart_port mux_ports[MUX_NR];
struct mux_port {
struct uart_port port;
int enabled;
};
static struct mux_port mux_ports[MUX_NR];
static struct uart_driver mux_driver = {
.owner = THIS_MODULE,
@ -66,7 +70,36 @@ static struct timer_list mux_timer;
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
#define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8
/**
* get_mux_port_count - Get the number of available ports on the Mux.
* @dev: The parisc device.
*
* This function is used to determine the number of ports the Mux
* supports. The IODC data reports the number of ports the Mux
* can support, but there are cases where not all the Mux ports
* are connected. This function can override the IODC and
* return the true port count.
*/
static int __init get_mux_port_count(struct parisc_device *dev)
{
int status;
u8 iodc_data[32];
unsigned long bytecnt;
/* If this is the built-in Mux for the K-Class (Eole CAP/MUX),
* we only need to allocate resources for 1 port since the
* other 7 ports are not connected.
*/
if(dev->id.hversion == 0x15)
return 1;
status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
BUG_ON(status != PDC_OK);
/* Return the number of ports specified in the iodc data. */
return ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8;
}
/**
* mux_tx_empty - Check if the transmitter fifo is empty.
@ -250,7 +283,7 @@ static void mux_read(struct uart_port *port)
*/
static int mux_startup(struct uart_port *port)
{
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
mux_ports[port->line].enabled = 1;
return 0;
}
@ -262,6 +295,7 @@ static int mux_startup(struct uart_port *port)
*/
static void mux_shutdown(struct uart_port *port)
{
mux_ports[port->line].enabled = 0;
}
/**
@ -319,7 +353,7 @@ static int mux_request_port(struct uart_port *port)
* @port: Ptr to the uart_port.
* @type: Bitmask of required configurations.
*
* Perform any autoconfiguration steps for the port. This functino is
* Perform any autoconfiguration steps for the port. This function is
* called if the UPF_BOOT_AUTOCONF flag is specified for the port.
* [Note: This is required for now because of a bug in the Serial core.
* rmk has already submitted a patch to linus, should be available for
@ -357,11 +391,11 @@ static void mux_poll(unsigned long unused)
int i;
for(i = 0; i < port_cnt; ++i) {
if(!mux_ports[i].info)
if(!mux_ports[i].enabled)
continue;
mux_read(&mux_ports[i]);
mux_write(&mux_ports[i]);
mux_read(&mux_ports[i].port);
mux_write(&mux_ports[i].port);
}
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
@ -371,8 +405,17 @@ static void mux_poll(unsigned long unused)
#ifdef CONFIG_SERIAL_MUX_CONSOLE
static void mux_console_write(struct console *co, const char *s, unsigned count)
{
while(count--)
pdc_iodc_putc(*s++);
/* Wait until the FIFO drains. */
while(UART_GET_FIFO_CNT(&mux_ports[0].port))
udelay(1);
while(count--) {
if(*s == '\n') {
UART_PUT_CHAR(&mux_ports[0].port, '\r');
}
UART_PUT_CHAR(&mux_ports[0].port, *s++);
}
}
static int mux_console_setup(struct console *co, char *options)
@ -428,19 +471,14 @@ static struct uart_ops mux_pops = {
*/
static int __init mux_probe(struct parisc_device *dev)
{
int i, status, ports;
u8 iodc_data[32];
unsigned long bytecnt;
struct uart_port *port;
int i, status;
status = pdc_iodc_read(&bytecnt, dev->hpa.start, 0, iodc_data, 32);
if(status != PDC_OK) {
printk(KERN_ERR "Serial mux: Unable to read IODC.\n");
return 1;
}
int port_count = get_mux_port_count(dev);
printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.6\n", port_count);
ports = GET_MUX_PORTS(iodc_data);
printk(KERN_INFO "Serial mux driver (%d ports) Revision: 0.3\n", ports);
dev_set_drvdata(&dev->dev, (void *)(long)port_count);
request_mem_region(dev->hpa.start + MUX_OFFSET,
port_count * MUX_LINE_OFFSET, "Mux");
if(!port_cnt) {
mux_driver.cons = MUX_CONSOLE;
@ -450,13 +488,10 @@ static int __init mux_probe(struct parisc_device *dev)
printk(KERN_ERR "Serial mux: Unable to register driver.\n");
return 1;
}
init_timer(&mux_timer);
mux_timer.function = mux_poll;
}
for(i = 0; i < ports; ++i, ++port_cnt) {
port = &mux_ports[port_cnt];
for(i = 0; i < port_count; ++i, ++port_cnt) {
struct uart_port *port = &mux_ports[port_cnt].port;
port->iobase = 0;
port->mapbase = dev->hpa.start + MUX_OFFSET +
(i * MUX_LINE_OFFSET);
@ -477,27 +512,73 @@ static int __init mux_probe(struct parisc_device *dev)
*/
port->timeout = HZ / 50;
spin_lock_init(&port->lock);
status = uart_add_one_port(&mux_driver, port);
BUG_ON(status);
}
#ifdef CONFIG_SERIAL_MUX_CONSOLE
register_console(&mux_console);
#endif
return 0;
}
static int __devexit mux_remove(struct parisc_device *dev)
{
int i, j;
int port_count = (long)dev_get_drvdata(&dev->dev);
/* Find Port 0 for this card in the mux_ports list. */
for(i = 0; i < port_cnt; ++i) {
if(mux_ports[i].port.mapbase == dev->hpa.start + MUX_OFFSET)
break;
}
BUG_ON(i + port_count > port_cnt);
/* Release the resources associated with each port on the device. */
for(j = 0; j < port_count; ++j, ++i) {
struct uart_port *port = &mux_ports[i].port;
uart_remove_one_port(&mux_driver, port);
if(port->membase)
iounmap(port->membase);
}
release_mem_region(dev->hpa.start + MUX_OFFSET, port_count * MUX_LINE_OFFSET);
return 0;
}
/* Hack. This idea was taken from the 8250_gsc.c on how to properly order
* the serial port detection in the proper order. The idea is we always
* want the builtin mux to be detected before addin mux cards, so we
* specifically probe for the builtin mux cards first.
*
* This table only contains the parisc_device_id of known builtin mux
* devices. All other mux cards will be detected by the generic mux_tbl.
*/
static struct parisc_device_id builtin_mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x15, 0x0000D }, /* All K-class */
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, 0x44, 0x0000D }, /* E35, E45, and E55 */
{ 0, }
};
static struct parisc_device_id mux_tbl[] = {
{ HPHW_A_DIRECT, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0000D },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, builtin_mux_tbl);
MODULE_DEVICE_TABLE(parisc, mux_tbl);
static struct parisc_driver builtin_serial_mux_driver = {
.name = "builtin_serial_mux",
.id_table = builtin_mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
static struct parisc_driver serial_mux_driver = {
.name = "serial_mux",
.id_table = mux_tbl,
.probe = mux_probe,
.remove = __devexit_p(mux_remove),
};
/**
@ -507,7 +588,21 @@ static struct parisc_driver serial_mux_driver = {
*/
static int __init mux_init(void)
{
return register_parisc_driver(&serial_mux_driver);
register_parisc_driver(&builtin_serial_mux_driver);
register_parisc_driver(&serial_mux_driver);
if(port_cnt > 0) {
/* Start the Mux timer */
init_timer(&mux_timer);
mux_timer.function = mux_poll;
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
register_console(&mux_console);
#endif
}
return 0;
}
/**
@ -517,14 +612,16 @@ static int __init mux_init(void)
*/
static void __exit mux_exit(void)
{
int i;
for (i = 0; i < port_cnt; i++) {
uart_remove_one_port(&mux_driver, &mux_ports[i]);
if (mux_ports[i].membase)
iounmap(mux_ports[i].membase);
/* Delete the Mux timer. */
if(port_cnt > 0) {
del_timer(&mux_timer);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
unregister_console(&mux_console);
#endif
}
unregister_parisc_driver(&builtin_serial_mux_driver);
unregister_parisc_driver(&serial_mux_driver);
uart_unregister_driver(&mux_driver);
}

View File

@ -31,9 +31,13 @@
#define STREGM std,ma
#define SHRREG shrd
#define SHLREG shld
#define ADDIB addib,*
#define CMPB cmpb,*
#define ANDCM andcm,*
#define RP_OFFSET 16
#define FRAME_SIZE 128
#define CALLEE_REG_FRAME_SIZE 144
#define ASM_ULONG_INSN .dword
#else /* CONFIG_64BIT */
#define LDREG ldw
#define STREG stw
@ -42,9 +46,13 @@
#define STREGM stwm
#define SHRREG shr
#define SHLREG shlw
#define ADDIB addib,
#define CMPB cmpb,
#define ANDCM andcm
#define RP_OFFSET 20
#define FRAME_SIZE 64
#define CALLEE_REG_FRAME_SIZE 128
#define ASM_ULONG_INSN .word
#endif
#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
@ -65,7 +73,7 @@
#ifdef __ASSEMBLY__
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
* work around that for now... */
.level 2.0w
@ -156,7 +164,7 @@
.endm
.macro loadgp
#ifdef __LP64__
#ifdef CONFIG_64BIT
ldil L%__gp, %r27
ldo R%__gp(%r27), %r27
#else
@ -334,7 +342,7 @@
fldd,mb -8(%r30), %fr12
.endm
#ifdef __LP64__
#ifdef CONFIG_64BIT
.macro callee_save
std,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
mfctl %cr27, %r3
@ -377,7 +385,7 @@
ldd,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#else /* ! __LP64__ */
#else /* ! CONFIG_64BIT */
.macro callee_save
stw,ma %r3, CALLEE_REG_FRAME_SIZE(%r30)
@ -420,7 +428,7 @@
mtctl %r3, %cr27
ldw,mb -CALLEE_REG_FRAME_SIZE(%r30), %r3
.endm
#endif /* ! __LP64__ */
#endif /* ! CONFIG_64BIT */
.macro save_specials regs
@ -441,7 +449,7 @@
mtctl %r0, %cr18
SAVE_CR (%cr18, PT_IAOQ1(\regs))
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0
* For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
* reads 5 bits. Use mfctl,w to read all six bits. Otherwise

View File

@ -58,7 +58,7 @@ extern void __xchg_called_with_bad_pointer(void);
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef __LP64__
#ifdef CONFIG_64BIT
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
@ -67,7 +67,7 @@ static __inline__ unsigned long
__xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch(size) {
#ifdef __LP64__
#ifdef CONFIG_64BIT
case 8: return __xchg64(x,(unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
@ -81,7 +81,7 @@ __xchg(unsigned long x, __volatile__ void * ptr, int size)
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could __LP64__ code use LDCD too?
** o and while we are at it, could CONFIG_64BIT code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
@ -105,7 +105,7 @@ static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
#ifdef __LP64__
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
@ -218,7 +218,7 @@ static __inline__ int atomic_read(const atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifdef __LP64__
#ifdef CONFIG_64BIT
typedef struct { volatile s64 counter; } atomic64_t;
@ -270,7 +270,7 @@ atomic64_read(const atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
#include <asm-generic/atomic.h>

View File

@ -60,31 +60,37 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr)
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long oldbit;
unsigned long old;
unsigned long flags;
int set;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr;
*addr = oldbit | mask;
old = *addr;
set = (old & mask) ? 1 : 0;
if (!set)
*addr = old | mask;
_atomic_spin_unlock_irqrestore(addr, flags);
return (oldbit & mask) ? 1 : 0;
return set;
}
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
{
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
unsigned long oldbit;
unsigned long old;
unsigned long flags;
int set;
addr += (nr >> SHIFT_PER_LONG);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = *addr;
*addr = oldbit & ~mask;
old = *addr;
set = (old & mask) ? 1 : 0;
if (set)
*addr = old & ~mask;
_atomic_spin_unlock_irqrestore(addr, flags);
return (oldbit & mask) ? 1 : 0;
return set;
}
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
@ -130,7 +136,7 @@ static __inline__ unsigned long __ffs(unsigned long x)
unsigned long ret;
__asm__(
#ifdef __LP64__
#ifdef CONFIG_64BIT
" ldi 63,%1\n"
" extrd,u,*<> %0,63,32,%%r0\n"
" extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */

View File

@ -1,14 +1,92 @@
#ifndef _PARISC_BUG_H
#define _PARISC_BUG_H
/*
* Tell the user there is some problem.
* The offending file and line are encoded in the __bug_table section.
*/
#ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
dump_stack(); \
panic("BUG!"); \
} while (0)
#define HAVE_ARCH_WARN_ON
/* the break instruction is used as BUG() marker. */
#define PARISC_BUG_BREAK_ASM "break 0x1f, 0x1fff"
#define PARISC_BUG_BREAK_INSN 0x03ffe01f /* PARISC_BUG_BREAK_ASM */
#if defined(CONFIG_64BIT)
#define ASM_WORD_INSN ".dword\t"
#else
#define ASM_WORD_INSN ".word\t"
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
for(;;) ; \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
for(;;) ; \
} while(0)
#endif
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define __WARN() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b, %c0\n" \
"\t.short %c1, %c2\n" \
"\t.org 2b+%c3\n" \
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#else
#define __WARN() \
do { \
asm volatile("\n" \
"1:\t" PARISC_BUG_BREAK_ASM "\n" \
"\t.pushsection __bug_table,\"a\"\n" \
"2:\t" ASM_WORD_INSN "1b\n" \
"\t.short %c0\n" \
"\t.org 2b+%c1\n" \
"\t.popsection" \
: : "i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry)) ); \
} while(0)
#endif
#define WARN_ON(x) ({ \
typeof(x) __ret_warn_on = (x); \
if (__builtin_constant_p(__ret_warn_on)) { \
if (__ret_warn_on) \
__WARN(); \
} else { \
if (unlikely(__ret_warn_on)) \
__WARN(); \
} \
unlikely(__ret_warn_on); \
})
#endif
#include <asm-generic/bug.h>
#endif

View File

@ -30,31 +30,11 @@
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
extern void flush_data_cache_local(void *); /* flushes local data-cache only */
extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */
#ifdef CONFIG_SMP
extern void flush_data_cache(void); /* flushes data-cache only (all processors) */
extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
#else
#define flush_data_cache() flush_data_cache_local(NULL)
#define flush_instruction_cache() flush_instruction_cache_local(NULL)
#endif
extern void parisc_cache_init(void); /* initializes cache-flushing */
extern void flush_all_caches(void); /* flush everything (tlb & cache) */
extern int get_cache_info(char *);
extern void flush_user_icache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_icache_range_asm(unsigned long, unsigned long);
extern void flush_user_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_page_asm(void *);
extern void flush_kernel_icache_page(void *);
extern void disable_sr_hashing(void); /* turns off space register hashing */
extern void disable_sr_hashing_asm(int); /* low level support for above */
extern void free_sid(unsigned long);
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
void disable_sr_hashing(void); /* turns off space register hashing */
void free_sid(unsigned long);
unsigned long alloc_sid(void);
extern void flush_user_dcache_page(unsigned long);
extern void flush_user_icache_page(unsigned long);
struct seq_file;
extern void show_cache_info(struct seq_file *m);
@ -63,6 +43,7 @@ extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));

View File

@ -2,60 +2,46 @@
#define _PARISC_CACHEFLUSH_H
#include <linux/mm.h>
#include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
/* Cache flush operations */
/* Internal implementation */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void *); /* flushes local code-cache only */
#ifdef CONFIG_SMP
#define flush_cache_mm(mm) flush_cache_all()
void flush_data_cache(void); /* flushes data-cache only (all processors) */
void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
#else
#define flush_cache_mm(mm) flush_cache_all_local()
#define flush_data_cache() flush_data_cache_local(NULL)
#define flush_instruction_cache() flush_instruction_cache_local(NULL)
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
void flush_user_dcache_page(unsigned long);
void flush_user_icache_page(unsigned long);
void flush_user_dcache_range(unsigned long, unsigned long);
void flush_user_icache_range(unsigned long, unsigned long);
/* Cache flush operations */
void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
extern void flush_cache_all_local(void);
static inline void cacheflush_h_tmp_function(void *dummy)
{
flush_cache_all_local();
}
static inline void flush_cache_all(void)
{
on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
}
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
extern int parisc_cache_flush_threshold;
void parisc_setup_cache_timing(void);
static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
}
static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
if ((end - start) < parisc_cache_flush_threshold)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
}
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) \
@ -63,9 +49,15 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_unlock(mapping) \
write_unlock_irq(&(mapping)->tree_lock)
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page(page); \
flush_kernel_icache_page(page_address(page)); \
} while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
#define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \
flush_kernel_icache_range_asm(s,e); \
} while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
@ -80,118 +72,17 @@ do { \
memcpy(dst, src, len); \
} while (0)
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(start,end);
flush_user_icache_range(start,end);
} else {
flush_cache_all();
}
}
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
static inline int translation_exists(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t pte;
if(pgd_none(*pgd))
return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
/* We cannot take the pte lock here: flush_cache_page is usually
* called with pte lock already held. Whereas flush_dcache_page
* takes flush_dcache_mmap_lock, which is lower in the hierarchy:
* the vma itself is secure, but the pte might come or go racily.
*/
pte = *pte_offset_map(pmd, addr);
/* But pte_unmap() does nothing on this architecture */
/* Filter out coincidental file entries and swap entries */
if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
return 0;
return pte_pfn(pte) == pfn;
}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
* process; we're going to hijack both it and the user space %sr3 to
* temporarily make the non-current process current. We have to do
* this because cache flushing may cause a non-access tlb miss which
* the handlers have to fill in from the pgd of the non-current
* process. */
static inline void
flush_user_cache_page_non_current(struct vm_area_struct *vma,
unsigned long vmaddr)
{
/* save the current process space and pgd */
unsigned long space = mfsp(3), pgd = mfctl(25);
/* we don't mind taking interrups since they may not
* do anything with user space, but we can't
* be preempted here */
preempt_disable();
/* make us current */
mtctl(__pa(vma->vm_mm->pgd), 25);
mtsp(vma->vm_mm->context, 3);
flush_user_dcache_page(vmaddr);
if(vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
/* put the old current process back */
mtsp(space, 3);
mtctl(pgd, 25);
preempt_enable();
}
static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
if (likely(vma->vm_mm->context == mfsp(3))) {
flush_user_dcache_page(vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
} else {
flush_user_cache_page_non_current(vma, vmaddr);
}
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
BUG_ON(!vma->vm_mm->context);
if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
flush_user_dcache_page(vmaddr);
}
#define ARCH_HAS_FLUSH_ANON_PAGE
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(void *addr);

View File

@ -236,7 +236,7 @@ int ccio_allocate_resource(const struct parisc_device *dev,
unsigned long min, unsigned long max, unsigned long align);
#else /* !CONFIG_IOMMU_CCIO */
#define ccio_get_iommu(dev) NULL
#define ccio_request_resource(dev, res) request_resource(&iomem_resource, res)
#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res)
#define ccio_allocate_resource(dev, res, size, min, max, align) \
allocate_resource(&iomem_resource, res, size, min, max, \
align, NULL, NULL)

View File

@ -220,7 +220,7 @@ typedef struct elf64_fdesc {
* macros, and then it includes fs/binfmt_elf.c to provide an alternate
* elf binary handler for 32 bit binaries (on the 64 bit kernel).
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32

View File

@ -1,19 +1,13 @@
#ifndef _PARISC_HARDWARE_H
#define _PARISC_HARDWARE_H
#include <linux/mod_devicetable.h>
#include <asm/pdc.h>
struct parisc_device_id {
unsigned char hw_type; /* 5 bits used */
unsigned char hversion_rev; /* 4 bits */
unsigned short hversion; /* 12 bits */
unsigned int sversion; /* 20 bits */
};
#define HWTYPE_ANY_ID 0xff
#define HVERSION_REV_ANY_ID 0xff
#define HVERSION_ANY_ID 0xffff
#define SVERSION_ANY_ID 0xffffffffU
#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
#define HVERSION_ANY_ID PA_HVERSION_ANY_ID
#define HVERSION_REV_ANY_ID PA_HVERSION_REV_ANY_ID
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */

View File

@ -67,7 +67,7 @@ static inline unsigned long long gsc_readq(unsigned long addr)
{
unsigned long long ret;
#ifdef __LP64__
#ifdef CONFIG_64BIT
__asm__ __volatile__(
" ldda 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
@ -108,7 +108,7 @@ static inline void gsc_writel(unsigned int val, unsigned long addr)
static inline void gsc_writeq(unsigned long long val, unsigned long addr)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
__asm__ __volatile__(
" stda %0,0(%1)\n"
: : "r" (val), "r" (addr) );

View File

@ -31,7 +31,7 @@ void __init register_led_regions(void);
#ifdef CONFIG_CHASSIS_LCD_LED
/* writes a string to the LCD display (if possible on this h/w) */
int lcd_print(char *str);
int lcd_print(const char *str);
#else
#define lcd_print(str)
#endif

View File

@ -1,6 +1,28 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
/* Nothing to see here... */
#ifndef __ASM_PARISC_LINKAGE_H
#define __ASM_PARISC_LINKAGE_H
#ifndef __ALIGN
#define __ALIGN .align 4
#define __ALIGN_STR ".align 4"
#endif
/*
* In parisc assembly a semicolon marks a comment while a
* exclamation mark is used to seperate independend lines.
*/
#define ENTRY(name) \
.export name !\
ALIGN !\
name:
#ifdef CONFIG_64BIT
#define ENDPROC(name) \
END(name)
#else
#define ENDPROC(name) \
.type name, @function !\
END(name)
#endif
#endif /* __ASM_PARISC_LINKAGE_H */

View File

@ -35,7 +35,7 @@ extern struct node_map_data node_data[];
#define PFNNID_MAP_MAX 512 /* support 512GB */
extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
#ifndef __LP64__
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
#else
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */

View File

@ -3,7 +3,7 @@
/*
* This file contains the parisc architecture specific module code.
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr

View File

@ -13,15 +13,15 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t msg_stime; /* last msgsnd time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad3;
#endif
__kernel_time_t msg_ctime; /* last change time */

View File

@ -105,7 +105,7 @@ extern int npmem_ranges;
/* WARNING: The definitions below must match exactly to sizeof(pte_t)
* etc
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define BITS_PER_PTE_ENTRY 3
#define BITS_PER_PMD_ENTRY 2
#define BITS_PER_PGD_ENTRY 2
@ -127,7 +127,11 @@ extern int npmem_ranges;
/* This governs the relationship between virtual and physical addresses.
* If you alter it, make sure to take care of our various fixed mapping
* segments in fixmap.h */
#define __PAGE_OFFSET (0x10000000)
#ifdef CONFIG_64BIT
#define __PAGE_OFFSET (0x40000000) /* 1GB */
#else
#define __PAGE_OFFSET (0x10000000) /* 256MB */
#endif
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)

View File

@ -15,7 +15,7 @@ struct parisc_device {
unsigned int num_addrs; /* some devices have additional address ranges. */
unsigned long *addr; /* which will be stored here */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* parms for pdc_pat_cell_module() call */
unsigned long pcell_loc; /* Physical Cell location */
unsigned long mod_index; /* PAT specific - Misc Module info */

View File

@ -341,7 +341,7 @@ struct pdc_model { /* for PDC_MODEL */
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
unsigned long
#ifdef __LP64__
#ifdef CONFIG_64BIT
cc_padW:32,
#endif
cc_alias: 4, /* alias boundaries for virtual addresses */
@ -357,7 +357,7 @@ struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
struct pdc_tlb_cf { /* for PDC_CACHE (I/D-TLB's) */
unsigned long tc_pad0:12, /* reserved */
#ifdef __LP64__
#ifdef CONFIG_64BIT
tc_padW:32,
#endif
tc_sh : 2, /* 0 = separate I/D-TLB, else shared I/D-TLB */
@ -445,7 +445,7 @@ struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
#endif /* !CONFIG_PA20 */
#ifdef __LP64__
#ifdef CONFIG_64BIT
struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
unsigned long entries_returned;
unsigned long entries_total;
@ -456,7 +456,7 @@ struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
unsigned int pages;
unsigned int reserved;
};
#endif /* __LP64__ */
#endif /* CONFIG_64BIT */
struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
unsigned long mod_addr;
@ -752,7 +752,7 @@ int pdc_get_initiator(struct hardware_path *, struct pdc_initiator *);
int pdc_tod_read(struct pdc_tod *tod);
int pdc_tod_set(unsigned long sec, unsigned long usec);
#ifdef __LP64__
#ifdef CONFIG_64BIT
int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
struct pdc_memory_table *tbl, unsigned long entries);
#endif

View File

@ -250,7 +250,7 @@ struct pdc_pat_pd_addr_map_entry {
#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
#define PAT_GET_MOD_PAGES(value) ((value) & 0xffffffUL)
/*
@ -303,35 +303,6 @@ extern int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 va
*/
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
* ----------------------------------------------------------
* Bit 0 to 51 - conf_base_addr
* Bit 52 to 62 - reserved
* Bit 63 - endianess bit
********************************************************************/
#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
* ----------------------------------------------------
* Bit 0 to 7 - entity type
* 0 = central agent, 1 = processor,
* 2 = memory controller, 3 = system bus adapter,
* 4 = local bus adapter, 5 = processor bus converter,
* 6 = crossbar fabric connect, 7 = fabric interconnect,
* 8 to 254 reserved, 255 = unknown.
* Bit 8 to 15 - DVI
* Bit 16 to 23 - IOC functions
* Bit 24 to 39 - reserved
* Bit 40 to 63 - mod_pages
* number of 4K pages a module occupies starting at conf_base_addr
********************************************************************/
#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
#endif /* __ASSEMBLY__ */
#endif /* ! __PARISC_PATPDC_H */

View File

@ -14,7 +14,7 @@
* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
* allocate the first pmd adjacent to the pgd. This means that we can
* subtract a constant offset to get to it. The pmd and pgd sizes are
* arranged so that a single pmd covers 4GB (giving a full LP64
* arranged so that a single pmd covers 4GB (giving a full 64-bit
* process access to 8TB) so our lookups are effectively L2 for the
* first 4GB of the kernel (i.e. for all ILP32 processes and all the
* kernel for machines with under 4GB of memory) */
@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#ifdef __LP64__
#ifdef CONFIG_64BIT
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(pgd_t *pgd)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(pmd_t *pmd)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
@ -99,7 +99,7 @@ static inline void pmd_free(pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)

View File

@ -10,7 +10,6 @@
* we simulate an x86-style page table for the linux mm code
*/
#include <linux/spinlock.h>
#include <linux/mm.h> /* for vm_area_struct */
#include <asm/processor.h>
#include <asm/cache.h>

View File

@ -20,7 +20,7 @@ typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
/* Note these change from narrow to wide kernels */
#ifdef __LP64__
#ifdef CONFIG_64BIT
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;

View File

@ -9,13 +9,10 @@
#define __ASM_PARISC_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <asm/prefetch.h> /* lockdep.h needs <linux/prefetch.h> */
#include <linux/threads.h>
#include <linux/spinlock_types.h>
#include <asm/prefetch.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@ -41,7 +38,7 @@
#define DEFAULT_TASK_SIZE32 (0xFFF00000UL)
#define DEFAULT_MAP_BASE32 (0x40000000UL)
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define DEFAULT_TASK_SIZE (MAX_ADDRESS-0xf000000)
#define DEFAULT_MAP_BASE (0x200000000UL)
#else
@ -87,7 +84,6 @@ struct cpuinfo_parisc {
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
#ifdef CONFIG_SMP
spinlock_t lock; /* synchronization for ipi's */
unsigned long pending_ipi; /* bitmap of type ipi_message_type */
unsigned long ipi_count; /* number ipi Interrupts */
#endif
@ -277,7 +273,7 @@ on downward growing arches, it looks like this:
* it in here from the current->personality
*/
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
#else
#define USER_WIDE_MODE 0

View File

@ -13,11 +13,11 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t sem_otime; /* last semop time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t sem_ctime; /* last change time */

View File

@ -13,19 +13,19 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad1;
#endif
__kernel_time_t shm_atime; /* last attach time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad2;
#endif
__kernel_time_t shm_dtime; /* last detach time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad3;
#endif
__kernel_time_t shm_ctime; /* last change time */
#ifndef __LP64__
#ifndef CONFIG_64BIT
unsigned int __pad4;
#endif
size_t shm_segsz; /* size of segment (bytes) */
@ -36,7 +36,7 @@ struct shmid64_ds {
unsigned int __unused2;
};
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* The 'unsigned int' (formerly 'unsigned long') data types below will
* ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
* a wide kernel, but if some of these values are meant to contain pointers

View File

@ -105,7 +105,7 @@
struct siginfo;
/* Type of a signal handler. */
#ifdef __LP64__
#ifdef CONFIG_64BIT
/* function pointers on 64-bit parisc are pointers to little structs and the
* compiler doesn't support code which changes or tests the address of
* the function in the little struct. This is really ugly -PB

View File

@ -41,14 +41,6 @@ extern void smp_send_all_nop(void);
#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
#undef ENTRY_SYS_CPUS
#ifdef ENTRY_SYS_CPUS
#define STATE_RENDEZVOUS 0
#define STATE_STOPPED 1
#define STATE_RUNNING 2
#define STATE_HALTED 3
#endif
extern unsigned long cpu_present_mask;
#define raw_smp_processor_id() (current_thread_info()->cpu)

View File

@ -1,10 +1,6 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;

View File

@ -30,11 +30,11 @@ struct statfs {
struct statfs64 {
long f_type;
long f_bsize;
u64 f_blocks;
u64 f_bfree;
u64 f_bavail;
u64 f_files;
u64 f_ffree;
__u64 f_blocks;
__u64 f_bfree;
__u64 f_bavail;
__u64 f_files;
__u64 f_ffree;
__kernel_fsid_t f_fsid;
long f_namelen;
long f_frsize;

View File

@ -34,7 +34,7 @@ struct pa_psw {
unsigned int i:1;
};
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
#else
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))

View File

@ -62,6 +62,7 @@ struct thread_info {
#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 5 /* 32 bit binary */
#define TIF_MEMDIE 6
#define TIF_RESTORE_SIGMASK 7 /* restore saved signal mask */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@ -69,9 +70,10 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
_TIF_NEED_RESCHED)
_TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
#endif /* __KERNEL__ */

View File

@ -73,33 +73,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
purge_tlb_end();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long npages;
void __flush_tlb_range(unsigned long sid,
unsigned long start, unsigned long end);
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
flush_tlb_all();
else {
mtsp(vma->vm_mm->context,1);
purge_tlb_start();
if (split_tlb) {
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
} else {
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
}
purge_tlb_end();
}
}
#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
#endif

View File

@ -31,7 +31,7 @@ typedef unsigned long long __u64;
*/
#ifdef __KERNEL__
#ifdef __LP64__
#ifdef CONFIG_64BIT
#define BITS_PER_LONG 64
#define SHIFT_PER_LONG 6
#else

View File

@ -4,7 +4,6 @@
/*
* User space memory access functions
*/
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/cache.h>
@ -43,16 +42,18 @@ static inline long access_ok(int type, const void __user * addr,
#define put_user __put_user
#define get_user __get_user
#if BITS_PER_LONG == 32
#if !defined(CONFIG_64BIT)
#define LDD_KERNEL(ptr) __get_kernel_bad();
#define LDD_USER(ptr) __get_user_bad();
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
#define ASM_WORD_INSN ".word\t"
#else
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
#define ASM_WORD_INSN ".dword\t"
#endif
/*
@ -66,6 +67,11 @@ struct exception_table_entry {
long fixup; /* fixup routine */
};
#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
".section __ex_table,\"aw\"\n" \
ASM_WORD_INSN #fault_addr ", " #except_addr "\n\t" \
".previous\n"
/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
@ -104,43 +110,19 @@ struct exception_data {
__gu_err; \
})
#ifdef __LP64__
#define __get_kernel_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%2),%0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.dword\t1b,fixup_get_user_skip_1\n" \
"\t.previous" \
__asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
#define __get_user_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.dword\t1b,fixup_get_user_skip_1\n" \
"\t.previous" \
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
#else
#define __get_kernel_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%2),%0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_get_user_skip_1\n" \
"\t.previous" \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
#define __get_user_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_get_user_skip_1\n" \
"\t.previous" \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err) \
: "r1");
#endif /* !__LP64__ */
#define __put_user(x,ptr) \
({ \
@ -179,80 +161,54 @@ struct exception_data {
* r8/r9 are already listed as err/val.
*/
#ifdef __LP64__
#define __put_kernel_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.dword\t1b,fixup_put_user_skip_1\n" \
"\t.previous" \
"\n1:\t" stx "\t%2,0(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
#define __put_user_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.dword\t1b,fixup_put_user_skip_1\n" \
"\t.previous" \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
#else
#define __put_kernel_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_put_user_skip_1\n" \
"\t.previous" \
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
#define __put_user_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_put_user_skip_1\n" \
"\t.previous" \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err) \
: "r1")
#define __put_kernel_asm64(__val,ptr) do { \
u64 __val64 = (u64)(__val); \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
#if !defined(CONFIG_64BIT)
#define __put_kernel_asm64(__val,ptr) do { \
u64 __val64 = (u64)(__val); \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%1)\n" \
"\n2:\tstw %3,4(%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_put_user_skip_2\n" \
"\t.word\t2b,fixup_put_user_skip_1\n" \
"\t.previous" \
"\n1:\tstw %2,0(%1)" \
"\n2:\tstw %3,4(%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
: "r1"); \
} while (0)
#define __put_user_asm64(__val,ptr) do { \
u64 __val64 = (u64)__val; \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
#define __put_user_asm64(__val,ptr) do { \
u64 __val64 = (u64)(__val); \
u32 hi = (__val64) >> 32; \
u32 lo = (__val64) & 0xffffffff; \
__asm__ __volatile__ ( \
"\n1:\tstw %2,0(%%sr3,%1)\n" \
"\n2:\tstw %3,4(%%sr3,%1)\n" \
"\t.section __ex_table,\"aw\"\n" \
"\t.word\t1b,fixup_get_user_skip_2\n" \
"\t.word\t2b,fixup_get_user_skip_1\n" \
"\t.previous" \
"\n1:\tstw %2,0(%%sr3,%1)" \
"\n2:\tstw %3,4(%%sr3,%1)\n\t" \
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
: "=r"(__pu_err) \
: "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \
: "r1"); \
} while (0)
#endif /* !__LP64__ */
#endif /* !defined(CONFIG_64BIT) */
/*

View File

@ -772,7 +772,7 @@
#define __NR_mknodat (__NR_Linux + 277)
#define __NR_fchownat (__NR_Linux + 278)
#define __NR_futimesat (__NR_Linux + 279)
#define __NR_newfstatat (__NR_Linux + 280)
#define __NR_fstatat64 (__NR_Linux + 280)
#define __NR_unlinkat (__NR_Linux + 281)
#define __NR_renameat (__NR_Linux + 282)
#define __NR_linkat (__NR_Linux + 283)
@ -786,8 +786,14 @@
#define __NR_splice (__NR_Linux + 291)
#define __NR_sync_file_range (__NR_Linux + 292)
#define __NR_tee (__NR_Linux + 293)
#define __NR_vmsplice (__NR_Linux + 294)
#define __NR_move_pages (__NR_Linux + 295)
#define __NR_getcpu (__NR_Linux + 296)
#define __NR_epoll_pwait (__NR_Linux + 297)
#define __NR_statfs64 (__NR_Linux + 298)
#define __NR_fstatfs64 (__NR_Linux + 299)
#define __NR_Linux_syscalls 294
#define __NR_Linux_syscalls (__NR_fstatfs64 + 1)
#define HPUX_GATEWAY_ADDR 0xC0000004
#define LINUX_GATEWAY_ADDR 0x100
@ -951,6 +957,8 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#endif /* __ASSEMBLY__ */

View File

@ -320,4 +320,16 @@ struct eisa_device_id {
#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s"
struct parisc_device_id {
__u8 hw_type; /* 5 bits used */
__u8 hversion_rev; /* 4 bits */
__u16 hversion; /* 12 bits */
__u32 sversion; /* 20 bits */
};
#define PA_HWTYPE_ANY_ID 0xff
#define PA_HVERSION_REV_ANY_ID 0xff
#define PA_HVERSION_ANY_ID 0xffff
#define PA_SVERSION_ANY_ID 0xffffffff
#endif /* LINUX_MOD_DEVICETABLE_H */

View File

@ -1329,6 +1329,7 @@ extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);

View File

@ -1140,7 +1140,8 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
return error;
}
static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
int
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
int error;
rcu_read_lock();

View File

@ -452,6 +452,24 @@ static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa,
return 1;
}
/* Looks like: parisc:tNhvNrevNsvN */
static int do_parisc_entry(const char *filename, struct parisc_device_id *id,
char *alias)
{
id->hw_type = TO_NATIVE(id->hw_type);
id->hversion = TO_NATIVE(id->hversion);
id->hversion_rev = TO_NATIVE(id->hversion_rev);
id->sversion = TO_NATIVE(id->sversion);
strcpy(alias, "parisc:");
ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type);
ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion);
ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev);
ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion);
return 1;
}
/* Ignore any prefix, eg. v850 prepends _ */
static inline int sym_is(const char *symbol, const char *name)
{
@ -559,6 +577,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
do_table(symval, sym->st_size,
sizeof(struct eisa_device_id), "eisa",
do_eisa_entry, mod);
else if (sym_is(symname, "__mod_parisc_device_table"))
do_table(symval, sym->st_size,
sizeof(struct parisc_device_id), "parisc",
do_parisc_entry, mod);
}
/* Now add out buffered information to the generated C source */

View File

@ -961,7 +961,7 @@ static int init_section_ref_ok(const char *name)
".opd", /* see comment [OPD] at exit_section_ref_ok() */
".toc1", /* used by ppc64 */
".stab",
".rodata",
".data.rel.ro", /* used by parisc64 */
".parainstructions",
".text.lock",
"__bug_table", /* used by powerpc for BUG() */
@ -984,6 +984,7 @@ static int init_section_ref_ok(const char *name)
".eh_frame",
".debug",
".parainstructions",
".rodata",
NULL
};
/* part of section name */

View File

@ -854,7 +854,7 @@ static struct snd_kcontrol_new snd_harmony_controls[] = {
HARMONY_GAIN_HE_SHIFT, 1, 0),
};
static void __init
static void __devinit
snd_harmony_mixer_reset(struct snd_harmony *h)
{
harmony_mute(h);
@ -863,7 +863,7 @@ snd_harmony_mixer_reset(struct snd_harmony *h)
harmony_unmute(h);
}
static int __init
static int __devinit
snd_harmony_mixer_init(struct snd_harmony *h)
{
struct snd_card *card = h->card;