1
0
Fork 0

Merge branch 'master' into export-slabh

wifi-calibration
Tejun Heo 2010-04-05 11:37:28 +09:00
commit 336f5899d2
183 changed files with 4625 additions and 2682 deletions

View File

@ -21,6 +21,15 @@ Required properties:
- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
threads.
Optional properties:
- fsl,firmware-phandle:
Usage: required only if there is no fsl,qe-firmware child node
Value type: <phandle>
Definition: Points to a firmware node (see "QE Firmware Node" below)
that contains the firmware that should be uploaded for this QE.
The compatible property for the firmware node should say,
"fsl,qe-firmware".
Recommended properties
- brg-frequency : the internal clock source frequency for baud-rate
generators in Hz.
@ -59,3 +68,48 @@ Example:
reg = <0 c000>;
};
};
* QE Firmware Node
This node defines a firmware binary that is embedded in the device tree, for
the purpose of passing the firmware from bootloader to the kernel, or from
the hypervisor to the guest.
The firmware node itself contains the firmware binary contents, a compatible
property, and any firmware-specific properties. The node should be placed
inside a QE node that needs it. Doing so eliminates the need for a
fsl,firmware-phandle property. Other QE nodes that need the same firmware
should define an fsl,firmware-phandle property that points to the firmware node
in the first QE node.
The fsl,firmware property can be specified in the DTS (possibly using incbin)
or can be inserted by the boot loader at boot time.
Required properties:
- compatible
Usage: required
Value type: <string>
Definition: A standard property. Specify a string that indicates what
kind of firmware it is. For QE, this should be "fsl,qe-firmware".
- fsl,firmware
Usage: required
Value type: <prop-encoded-array>, encoded as an array of bytes
Definition: A standard property. This property contains the firmware
binary "blob".
Example:
qe1@e0080000 {
compatible = "fsl,qe";
qe_firmware:qe-firmware {
compatible = "fsl,qe-firmware";
fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
};
...
};
qe2@e0090000 {
compatible = "fsl,qe";
fsl,firmware-phandle = <&qe_firmware>;
...
};

View File

@ -3270,6 +3270,16 @@ S: Maintained
F: include/linux/kexec.h
F: kernel/kexec.c
KEYS/KEYRINGS:
M: David Howells <dhowells@redhat.com>
L: keyrings@linux-nfs.org
S: Maintained
F: Documentation/keys.txt
F: include/linux/key.h
F: include/linux/key-type.h
F: include/keys/
F: security/keys/
KGDB
M: Jason Wessel <jason.wessel@windriver.com>
L: kgdb-bugreport@lists.sourceforge.net

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 34
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*

View File

@ -15,6 +15,7 @@
#include <asm/glue.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
#include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
@ -219,12 +220,6 @@ struct cpu_cache_fns {
void (*dma_flush_range)(const void *, const void *);
};
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
};
/*
* Select the calling method
*/
@ -281,37 +276,6 @@ extern void dmac_flush_range(const void *, const void *);
#endif
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user

View File

@ -13,6 +13,7 @@
#define __ASM_CLKDEV_H
struct clk;
struct device;
struct clk_lookup {
struct list_head node;

View File

@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);

View File

@ -0,0 +1,75 @@
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
#define __ASM_OUTERCACHE_H
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
};
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
#ifdef CONFIG_OUTER_CACHE_SYNC
static inline void outer_sync(void)
{
if (outer_cache.sync)
outer_cache.sync();
}
#else
static inline void outer_sync(void)
{ }
#endif
#endif /* __ASM_OUTERCACHE_H */

View File

@ -60,6 +60,8 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
#include <asm/outercache.h>
#define __exception __attribute__((section(".exception.text")))
struct thread_info;
@ -137,10 +139,12 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
#define mb() dmb()
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dmb()
#define wmb() dmb()
#define wmb() mb()
#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@ -152,9 +156,9 @@ extern unsigned int user_debug;
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_mb() dmb()
#define smp_rmb() dmb()
#define smp_wmb() dmb()
#endif
#define read_barrier_depends() do { } while(0)

View File

@ -394,6 +394,14 @@ void __kprobes jprobe_return(void)
/*
* Setup an empty pt_regs. Fill SP and PC fields as
* they're needed by longjmp_break_handler.
*
* We allocate some slack between the original SP and start of
* our fabricated regs. To be precise we want to have worst case
* covered which is STMFD with all 16 regs so we allocate 2 *
* sizeof(struct_pt_regs)).
*
* This is to prevent any simulated instruction from writing
* over the regs when they are accessing the stack.
*/
"sub sp, %0, %1 \n\t"
"ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
@ -411,7 +419,7 @@ void __kprobes jprobe_return(void)
"ldmia sp, {r0 - pc} \n\t"
:
: "r" (kcb->jprobe_saved_regs.ARM_sp),
"I" (sizeof(struct pt_regs)),
"I" (sizeof(struct pt_regs) * 2),
"J" (offsetof(struct pt_regs, ARM_sp)),
"J" (offsetof(struct pt_regs, ARM_pc)),
"J" (offsetof(struct pt_regs, ARM_cpsr))

View File

@ -74,7 +74,7 @@ ENTRY(memmove)
rsb ip, ip, #32
addne pc, pc, ip @ C is always clear here
b 7f
6: nop
6: W(nop)
W(ldr) r3, [r1, #-4]!
W(ldr) r4, [r1, #-4]!
W(ldr) r5, [r1, #-4]!
@ -85,7 +85,7 @@ ENTRY(memmove)
add pc, pc, ip
nop
nop
W(nop)
W(str) r3, [r0, #-4]!
W(str) r4, [r0, #-4]!
W(str) r5, [r0, #-4]!

View File

@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
config OUTER_CACHE
bool
config OUTER_CACHE_SYNC
bool
help
The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache.
config CACHE_FEROCEON_L2
bool "Enable the Feroceon L2 cache controller"
depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@ -757,6 +763,7 @@ config CACHE_L2X0
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
help
This option enables the L2x0 PrimeCell.
@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT
int
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
config ARCH_HAS_BARRIERS
bool
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.

View File

@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)
}
#endif
static void l2x0_cache_sync(void)
{
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static inline void l2x0_inv_all(void)
{
unsigned long flags;
@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
printk(KERN_INFO "L2X0 cache controller enabled\n");
}

View File

@ -545,7 +545,7 @@ static int __init vfp_init(void)
*/
elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
if (VFP_arch >= 3) {
if (VFP_arch >= 2) {
elf_hwcap |= HWCAP_VFPv3;
/*

View File

@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config PCI
def_bool n
config DTC
def_bool y

View File

@ -84,7 +84,7 @@ define archhelp
echo '* linux.bin - Create raw binary'
echo ' linux.bin.gz - Create compressed raw binary'
echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
echo ' - stripped elf with fdt blob
echo ' - stripped elf with fdt blob'
echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
echo ' *_defconfig - Select default config from arch/microblaze/configs'
echo ''
@ -94,3 +94,5 @@ define archhelp
echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
echo ' (minus the .dts extension).'
endef
MRPROPER_FILES += $(boot)/simpleImage.*

View File

@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
endif
$(obj)/linux.bin: vmlinux FORCE
[ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy)
$(call if_changed,uimage)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
$(obj)/%.dtb: $(dtstree)/%.dts FORCE
$(call if_changed,dtc)
clean-kernel += linux.bin linux.bin.gz simpleImage.*
clean-files += *.dtb simpleImage.*.unstrip
clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub

View File

@ -14,7 +14,6 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/registers.h>
#include <asm/segment.h>
#include <asm/entry.h>
#include <asm/current.h>

View File

@ -1,49 +0,0 @@
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_MICROBLAZE_SEGMENT_H
#define _ASM_MICROBLAZE_SEGMENT_H
# ifndef __ASSEMBLY__
typedef struct {
unsigned long seg;
} mm_segment_t;
/*
* On Microblaze the fs value is actually the top of the corresponding
* address space.
*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*
* For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/
# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
# ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
# define segment_eq(a, b) ((a).seg == (b).seg)
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SEGMENT_H */

View File

@ -19,7 +19,6 @@
#ifndef __ASSEMBLY__
# include <linux/types.h>
# include <asm/processor.h>
# include <asm/segment.h>
/*
* low level task data that entry.S needs immediate access to
@ -60,6 +59,10 @@ struct cpu_context {
__u32 fsr;
};
typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */

View File

@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
#define __tlbie(x) { _tlbie(x); }
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{ _tlbie(vmaddr); }
{ __tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }

View File

@ -22,288 +22,38 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/segment.h>
#include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
#ifndef CONFIG_MMU
extern int ___range_ok(unsigned long addr, unsigned long size);
#define __range_ok(addr, size) \
___range_ok((unsigned long)(addr), (unsigned long)(size))
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
/* Undefined function to trigger linker error */
extern int bad_user_access_length(void);
/* FIXME this is function for optimalization -> memcpy */
#define __get_user(var, ptr) \
({ \
int __gu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
(var) = *(ptr); \
break; \
case 8: \
memcpy((void *) &(var), (ptr), 8); \
break; \
default: \
(var) = 0; \
__gu_err = __get_user_bad(); \
break; \
} \
__gu_err; \
})
#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
/* FIXME is not there defined __pu_val */
#define __put_user(var, ptr) \
({ \
int __pu_err = 0; \
switch (sizeof(*(ptr))) { \
case 1: \
case 2: \
case 4: \
*(ptr) = (var); \
break; \
case 8: { \
typeof(*(ptr)) __pu_val = (var); \
memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
} \
break; \
default: \
__pu_err = __put_user_bad(); \
break; \
} \
__pu_err; \
})
#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
#define put_user(x, ptr) __put_user((x), (ptr))
#define get_user(x, ptr) __get_user((x), (ptr))
#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
#define __copy_to_user_inatomic(to, from, n) \
(__copy_to_user((to), (from), (n)))
#define __copy_from_user_inatomic(to, from, n) \
(__copy_from_user((to), (from), (n)))
static inline unsigned long clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
size = __clear_user(addr, size);
return size;
}
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
extern long strncpy_from_user(char *dst, const char *src, long count);
extern long strnlen_user(const char *src, long count);
#else /* CONFIG_MMU */
/*
* Address is valid if:
* - "addr", "addr + size" and "size" are all below the limit
* On Microblaze the fs value is actually the top of the corresponding
* address space.
*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*
* For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
*/
#define access_ok(type, addr, size) \
(get_fs().seg > (((unsigned long)(addr)) | \
(size) | ((unsigned long)(addr) + (size))))
# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */
# ifndef CONFIG_MMU
# define KERNEL_DS MAKE_MM_SEG(0)
# define USER_DS KERNEL_DS
# else
# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
# endif
/*
* All the __XXX versions macros/functions below do not perform
* access checking. It is assumed that the necessary checks have been
* already performed before the finction (macro) is called.
*/
# define get_ds() (KERNEL_DS)
# define get_fs() (current_thread_info()->addr_limit)
# define set_fs(val) (current_thread_info()->addr_limit = (val))
#define get_user(x, ptr) \
({ \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
? __get_user((x), (ptr)) : -EFAULT; \
})
#define put_user(x, ptr) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \
})
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_val = 0; __gu_err = -EINVAL; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err), "=r"(__gu_val) \
: "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) volatile __gu_val = (x); \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__put_user_asm("sw", (ptr), __gu_val, __gu_err); \
break; \
case 8: \
__put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \
default: \
__gu_err = -EINVAL; \
} \
__gu_err; \
})
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ (" lwi %0, %1, 0; \
1: swi %0, %2, 0; \
lwi %0, %1, 4; \
2: swi %0, %2, 4; \
addk %0,r0,r0; \
3: \
.section .fixup,\"ax\"; \
4: brid 3b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \
.previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), \
"r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0; \
addk %0, r0, r0; \
2: \
.section .fixup,\"ax\"; \
3: brid 2b; \
addik %0, r0, %3; \
.previous; \
.section __ex_table,\"a\"; \
.word 1b,3b; \
.previous;" \
: "=r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/*
* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
*/
static inline int clear_user(char *to, int size)
{
if (size && access_ok(VERIFY_WRITE, to, size)) {
__asm__ __volatile__ (" \
1: \
sb r0, %2, r0; \
addik %0, %0, -1; \
bneid %0, 1b; \
addik %2, %2, 1; \
2: \
.section __ex_table,\"a\"; \
.word 1b,2b; \
.section .text;" \
: "=r"(size) \
: "0"(size), "r"(to)
);
}
return size;
}
#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
#define copy_to_user(to, from, n) \
(access_ok(VERIFY_WRITE, (to), (n)) ? \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n)) \
: -EFAULT)
#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
#define copy_from_user(to, from, n) \
(access_ok(VERIFY_READ, (from), (n)) ? \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n)) \
: -EFAULT)
extern int __strncpy_user(char *to, const char __user *from, int len);
extern int __strnlen_user(const char __user *sstr, int len);
#define strncpy_from_user(to, from, len) \
(access_ok(VERIFY_READ, from, 1) ? \
__strncpy_user(to, from, len) : -EFAULT)
#define strnlen_user(str, len) \
(access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
#endif /* CONFIG_MMU */
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
# define segment_eq(a, b) ((a).seg == (b).seg)
/*
* The exception table consists of pairs of addresses: the first is the
@ -321,6 +71,297 @@ struct exception_table_entry {
unsigned long insn, fixup;
};
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */
static inline int ___range_ok(unsigned long addr, unsigned long size)
{
return ((addr < memory_start) ||
((addr + size) > memory_end));
}
#define __range_ok(addr, size) \
___range_ok((unsigned long)(addr), (unsigned long)(size))
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#else
/*
* Address is valid if:
* - "addr", "addr + size" and "size" are all below the limit
*/
#define access_ok(type, addr, size) \
(get_fs().seg > (((unsigned long)(addr)) | \
(size) | ((unsigned long)(addr) + (size))))
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */
#endif
#ifdef CONFIG_MMU
# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
#else
# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
#endif
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
static inline unsigned long __must_check __clear_user(void __user *to,
unsigned long n)
{
/* normal memset with two words to __ex_table */
__asm__ __volatile__ ( \
"1: sb r0, %2, r0;" \
" addik %0, %0, -1;" \
" bneid %0, 1b;" \
" addik %2, %2, 1;" \
"2: " \
__EX_TABLE_SECTION \
".word 1b,2b;" \
".previous;" \
: "=r"(n) \
: "0"(n), "r"(to)
);
return n;
}
static inline unsigned long __must_check clear_user(void __user *to,
unsigned long n)
{
might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
return __clear_user(to, n);
}
/* put_user and get_user macros */
extern long __user_bad(void);
#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0;" \
" addk %0, r0, r0;" \
"2: " \
__FIXUP_SECTION \
"3: brid 2b;" \
" addik %0, r0, %3;" \
".previous;" \
__EX_TABLE_SECTION \
".word 1b,3b;" \
".previous;" \
: "=&r"(__gu_err), "=r"(__gu_val) \
: "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/**
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
default: \
/* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
#define get_user(x, ptr) \
({ \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
? __get_user((x), (ptr)) : -EFAULT; \
})
#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ ( \
"1:" insn " %1, %2, r0;" \
" addk %0, r0, r0;" \
"2: " \
__FIXUP_SECTION \
"3: brid 2b;" \
" addik %0, r0, %3;" \
".previous;" \
__EX_TABLE_SECTION \
".word 1b,3b;" \
".previous;" \
: "=&r"(__gu_err) \
: "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
({ \
__asm__ __volatile__ (" lwi %0, %1, 0;" \
"1: swi %0, %2, 0;" \
" lwi %0, %1, 4;" \
"2: swi %0, %2, 4;" \
" addk %0, r0, r0;" \
"3: " \
__FIXUP_SECTION \
"4: brid 3b;" \
" addik %0, r0, %3;" \
".previous;" \
__EX_TABLE_SECTION \
".word 1b,4b,2b,4b;" \
".previous;" \
: "=&r"(__gu_err) \
: "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
); \
})
/**
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) volatile __gu_val = (x); \
long __gu_err = 0; \
switch (sizeof(__gu_val)) { \
case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \
case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
__put_user_asm("sw", (ptr), __gu_val, __gu_err); \
break; \
case 8: \
__put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \
default: \
/*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
} \
__gu_err; \
})
#ifndef CONFIG_MMU
#define put_user(x, ptr) __put_user((x), (ptr))
#else /* CONFIG_MMU */
#define put_user(x, ptr) \
({ \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
? __put_user((x), (ptr)) : -EFAULT; \
})
#endif /* CONFIG_MMU */
/* copy_to_from_user */
#define __copy_from_user(to, from, n) \
__copy_tofrom_user((__force void __user *)(to), \
(void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
might_sleep();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
}
#define __copy_to_user(to, from, n) \
__copy_tofrom_user((void __user *)(to), \
(__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
might_sleep();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
return n;
}
/*
* Copy a null terminated string from userspace.
*/
extern int __strncpy_user(char *to, const char __user *from, int len);
#define __strncpy_from_user __strncpy_user
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
return __strncpy_from_user(dst, src, count);
}
/*
* Return the size of a string (including the ending 0)
*
* Return 0 on exception, a value greater than N if too long
*/
extern int __strnlen_user(const char __user *sstr, int len);
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
return __strnlen_user(src, n);
}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */

View File

@ -38,7 +38,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
static unsigned long get_dma_direct_offset(struct device *dev)
{
if (dev)
if (likely(dev))
return (unsigned long)dev->archdata.dma_data;
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */

View File

@ -51,6 +51,12 @@ swapper_pg_dir:
.text
ENTRY(_start)
#if CONFIG_KERNEL_BASE_ADDR == 0
brai TOPHYS(real_start)
.org 0x100
real_start:
#endif
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
@ -99,8 +105,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
sb r2, r4, r6 /* addr[r4+r6]= r7*/
lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
sb r2, r4, r6 /* addr[r4+r6]= r2*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
@ -128,7 +134,7 @@ _copy_bram:
* virtual to physical.
*/
nop
addik r3, r0, 63 /* Invalidate all TLB entries */
addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */

View File

@ -313,13 +313,13 @@ _hw_exception_handler:
mfs r5, rmsr;
nop
swi r5, r1, 0;
mfs r3, resr
mfs r4, resr
nop
mfs r4, rear;
mfs r3, rear;
nop
#ifndef CONFIG_MMU
andi r5, r3, 0x1000; /* Check ESR[DS] */
andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@ -327,13 +327,14 @@ not_in_delay_slot:
swi r17, r1, PT_R17
#endif
andi r5, r3, 0x1F; /* Extract ESR[EXC] */
andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
#ifdef DEBUG
/* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
@ -341,6 +342,7 @@ not_in_delay_slot:
lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram)
#endif
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
swi r18, r1, PT_R18
or r5, r1, r0
andi r6, r3, 0x1F; /* Load ESR[EC] */
andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE
mfs r7, rfsr
@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
*/
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
* R4 = EAR
* R4 = ESR
* R3 = EAR
*/
#ifdef CONFIG_MMU
andi r6, r3, 0x1000 /* Check ESR[DS] */
andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@ -439,7 +441,7 @@ _no_delayslot:
RESTORE_STATE;
bri unaligned_data_trap
#endif
andi r6, r3, 0x3E0; /* Mask and extract the register operand */
andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
@ -448,33 +450,33 @@ _no_delayslot:
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
andi r6, r3, 0x400; /* Extract ESR[S] */
andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
ex_lw:
andi r6, r3, 0x800; /* Extract ESR[W] */
andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw;
lbui r5, r4, 0; /* Exception address in r4 */
lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address
and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r4, 1;
lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
lbui r5, r4, 2;
lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
lbui r5, r4, 3;
lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
/* Get the destination register value into r3 */
lwi r3, r0, TOPHYS(ex_tmp_data_loc_0);
/* Get the destination register value into r4 */
lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail;
ex_lhw:
lbui r5, r4, 0; /* Exception address in r4 */
lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination
address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r4, 1;
lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
/* Get the destination register value into r3 */
lhui r3, r0, TOPHYS(ex_tmp_data_loc_0);
/* Get the destination register value into r4 */
lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
@ -502,25 +504,25 @@ ex_sw_tail:
andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw;
/* Get the word - delay slot */
swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */
lbui r3, r0, TOPHYS(ex_tmp_data_loc_0);
sbi r3, r4, 0;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_1);
sbi r3, r4, 1;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r3, r4, 2;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r3, r4, 3;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
sbi r4, r3, 0;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
sbi r4, r3, 1;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r4, r3, 2;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r4, r3, 3;
bri ex_handler_done;
ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */
swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r3, r4, 0;
lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r3, r4, 1;
swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r4, r3, 0;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
@ -560,21 +562,16 @@ ex_handler_done:
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
bgti r4, ex3
ori r5, r0, CONFIG_KERNEL_START
cmpu r5, r3, r5
bgti r5, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2
@ -589,8 +586,6 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
mfs r4, resr
nop
andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2
/* get current task address */
@ -665,8 +660,6 @@ ex_handler_done:
* R3 = ESR
*/
mfs r3, rear /* Get faulting address */
nop
RESTORE_STATE;
bri page_fault_instr_trap
@ -677,18 +670,15 @@ ex_handler_done:
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
* R3 = EAR, R4 = ESR
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
ori r6, r0, CONFIG_KERNEL_START
cmpu r4, r3, r6
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
@ -731,9 +721,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@ -754,9 +743,6 @@ ex_handler_done:
*/
mfs r11, rpid
nop
bri 4
mfs r3, rear /* Get faulting address */
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@ -792,7 +778,7 @@ ex_handler_done:
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
beqi r6, ex7
beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
@ -805,9 +791,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
bri finish_tlb_load
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@ -837,9 +822,9 @@ ex_handler_done:
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1
cmp r31, r5, r6
blti r31, sem
blti r31, ex12
addik r5, r6, 1
sem:
ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
@ -859,7 +844,6 @@ ex_handler_done:
nop
/* Done...restore registers and get out of here. */
ex12:
mts rpid, r11
nop
bri 4

View File

@ -26,9 +26,10 @@
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
.type _tlbia, @function
.align 4;
_tlbia:
addik r12, r0, 63 /* flush all entries (63 - 3) */
addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
@ -41,11 +42,13 @@ _tlbia_1:
/* sync */
rtsd r15, 8
nop
.size _tlbia, . - _tlbia
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
.type _tlbie, @function
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
@ -59,17 +62,20 @@ _tlbie_1:
rtsd r15, 8
nop
.size _tlbie, . - _tlbie
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
.type early_console_reg_tlb_alloc, @function
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
ori r4, r0, 63
ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
rtsd r15, 8
nop
.size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
/*
* Copy a whole page (4096 bytes).
*/
@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
.type copy_page, @function
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@ -118,3 +127,5 @@ _copy_page_loop:
addik r11, r11, -1
rtsd r15, 8
nop
.size copy_page, . - copy_page

View File

@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void)
{
if (!hlt_counter) {
if (likely(hlt_counter)) {
while (!need_resched())
cpu_relax();
} else {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
local_irq_disable();
@ -82,9 +86,7 @@ void default_idle(void)
cpu_sleep();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
} else
while (!need_resched())
cpu_relax();
}
}
void cpu_idle(void)

View File

@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
#define eprintk early_printk
#else
#define eprintk printk
#endif
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
{
@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
early_printk("Ramdisk addr 0x%08x, ", ram);
eprintk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
early_printk("FDT at 0x%08x\n", fdt);
eprintk("FDT at 0x%08x\n", fdt);
else
early_printk("Compiled-in FDT at 0x%08x\n",
eprintk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
early_printk("Found romfs @ 0x%08x (0x%08x)\n",
eprintk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
early_printk("#### klimit %p ####\n", old_klimit);
eprintk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss);
early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
early_printk("!!!Your kernel has setup MSR instruction but "
eprintk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %d\n", msr);
#else
if (!msr)
early_printk("!!!Your kernel not setup MSR instruction but "
eprintk("!!!Your kernel not setup MSR instruction but "
"CPU have it %d\n", msr);
#endif

View File

@ -22,13 +22,11 @@ void trap_init(void)
__enable_hw_exceptions();
}
static int kstack_depth_to_print = 24;
static unsigned long kstack_depth_to_print = 24;
static int __init kstack_setup(char *s)
{
kstack_depth_to_print = strict_strtoul(s, 0, NULL);
return 1;
return !strict_strtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);

View File

@ -10,5 +10,4 @@ else
lib-y += memcpy.o memmove.o
endif
lib-$(CONFIG_NO_MMU) += uaccess.o
lib-$(CONFIG_MMU) += uaccess_old.o
lib-y += uaccess_old.o

View File

@ -30,8 +30,9 @@
*/
#include <linux/linkage.h>
.text
.globl memcpy
.type memcpy, @function
.ent memcpy
memcpy:
@ -345,9 +346,11 @@ a_done:
rtsd r15, 8
nop
.size memcpy, . - memcpy
.end memcpy
/*----------------------------------------------------------------------------*/
.globl memmove
.type memmove, @function
.ent memmove
memmove:
@ -659,4 +662,5 @@ d_done:
rtsd r15, 8
nop
.size memmove, . - memmove
.end memmove

View File

@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
const uint32_t *i_src;
uint32_t *i_dst;
if (c >= 4) {
if (likely(c >= 4)) {
unsigned value, buf_hold;
/* Align the dstination to a word boundry. */

View File

@ -33,22 +33,23 @@
#ifdef __HAVE_ARCH_MEMSET
void *memset(void *v_src, int c, __kernel_size_t n)
{
char *src = v_src;
#ifdef CONFIG_OPT_LIB_FUNCTION
uint32_t *i_src;
uint32_t w32;
uint32_t w32 = 0;
#endif
/* Truncate c to 8 bits */
c = (c & 0xFF);
#ifdef CONFIG_OPT_LIB_FUNCTION
/* Make a repeating word out of it */
w32 = c;
w32 |= w32 << 8;
w32 |= w32 << 16;
if (unlikely(c)) {
/* Make a repeating word out of it */
w32 = c;
w32 |= w32 << 8;
w32 |= w32 << 16;
}
if (n >= 4) {
if (likely(n >= 4)) {
/* Align the destination to a word boundary */
/* This is done in an endian independant manner */
switch ((unsigned) src & 3) {

View File

@ -1,48 +0,0 @@
/*
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/string.h>
#include <asm/uaccess.h>
#include <asm/bug.h>
long strnlen_user(const char __user *src, long count)
{
return strlen(src) + 1;
}
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
char *tmp; \
strncpy(dst, src, count); \
for (tmp = dst; *tmp && count > 0; tmp++, count--) \
; \
res = (tmp - dst); \
} while (0)
long __strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
long strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
__do_strncpy_from_user(dst, src, count, res);
return res;
}
unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size)
{
memcpy(to, from, size);
return 0;
}

View File

@ -22,6 +22,7 @@
.text
.globl __strncpy_user;
.type __strncpy_user, @function
.align 4;
__strncpy_user:
@ -50,7 +51,7 @@ __strncpy_user:
3:
rtsd r15,8
nop
.size __strncpy_user, . - __strncpy_user
.section .fixup, "ax"
.align 2
@ -72,6 +73,7 @@ __strncpy_user:
.text
.globl __strnlen_user;
.type __strnlen_user, @function
.align 4;
__strnlen_user:
addik r3,r6,0
@ -90,7 +92,7 @@ __strnlen_user:
3:
rtsd r15,8
nop
.size __strnlen_user, . - __strnlen_user
.section .fixup,"ax"
4:
@ -108,6 +110,7 @@ __strnlen_user:
*/
.text
.globl __copy_tofrom_user;
.type __copy_tofrom_user, @function
.align 4;
__copy_tofrom_user:
/*
@ -116,20 +119,34 @@ __copy_tofrom_user:
* r7, r3 - count
* r4 - tempval
*/
addik r3,r7,0
beqi r3,3f
1:
lbu r4,r6,r0
addik r6,r6,1
2:
sb r4,r5,r0
addik r3,r3,-1
bneid r3,1b
addik r5,r5,1 /* delay slot */
beqid r7, 3f /* zero size is not likely */
andi r3, r7, 0x3 /* filter add count */
bneid r3, 4f /* if is odd value then byte copying */
or r3, r5, r6 /* find if is any to/from unaligned */
andi r3, r3, 0x3 /* mask unaligned */
bneid r3, 1f /* it is unaligned -> then jump */
or r3, r0, r0
/* at least one 4 byte copy */
5: lw r4, r6, r3
6: sw r4, r5, r3
addik r7, r7, -4
bneid r7, 5b
addik r3, r3, 4
addik r3, r7, 0
rtsd r15, 8
nop
4: or r3, r0, r0
1: lbu r4,r6,r3
2: sb r4,r5,r3
addik r7,r7,-1
bneid r7,1b
addik r3,r3,1 /* delay slot */
3:
addik r3,r7,0
rtsd r15,8
nop
.size __copy_tofrom_user, . - __copy_tofrom_user
.section __ex_table,"a"
.word 1b,3b,2b,3b
.word 1b,3b,2b,3b,5b,3b,6b,3b

View File

@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (kernel_mode(regs) && (address >= TASK_SIZE)) {
if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address);
}
@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_KGDB */
if (in_atomic() || !mm) {
if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
vma = find_vma(mm, address);
if (!vma)
if (unlikely(!vma))
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
goto bad_area;
if (!is_write)
if (unlikely(!is_write))
goto bad_area;
/*
@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
if (unlikely(address + 0x100000 < vma->vm_end)) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
@ -209,15 +209,15 @@ good_area:
code = SEGV_ACCERR;
/* a write */
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
if (unlikely(is_write)) {
if (unlikely(!(vma->vm_flags & VM_WRITE)))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
if (unlikely(error_code & 0x08000000))
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
goto bad_area;
}
@ -235,7 +235,7 @@ survive:
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR)
if (unlikely(fault & VM_FAULT_MAJOR))
current->maj_flt++;
else
current->min_flt++;

View File

@ -166,7 +166,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@ -209,14 +208,6 @@ void __init mem_init(void)
}
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */
int ___range_ok(unsigned long addr, unsigned long size)
{
return ((addr < memory_start) ||
((addr + size) > memory_end));
}
EXPORT_SYMBOL(___range_ok);
int page_is_ram(unsigned long pfn)
{
return __range_ok(pfn, 0);

View File

@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
if (mem_init_done)
if (unlikely(mem_init_done))
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
}

View File

@ -128,7 +128,6 @@ _GLOBAL(__restore_cpu_power7)
/* place holder */
blr
#ifdef CONFIG_EVENT_TRACING
/*
* Get a minimal set of registers for our caller's nth caller.
* r3 = regs pointer, r5 = n.
@ -154,4 +153,3 @@ _GLOBAL(perf_arch_fetch_caller_regs)
PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
blr
#endif /* CONFIG_EVENT_TRACING */

View File

@ -481,6 +481,8 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
if (rc)
goto err_bcom_rx_irq;
lpbfifo.dma_irqs_enabled = 1;
/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
lpbfifo.bcom_tx_task =
bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.33-rc2
# Mon Jan 4 11:20:36 2010
# Linux kernel version: 2.6.34-rc2
# Mon Mar 29 02:21:58 2010
#
CONFIG_SUPERH=y
CONFIG_SUPERH32=y
@ -13,8 +13,8 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
CONFIG_SPARSE_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_TIME=y
CONFIG_GENERIC_CLOCKEVENTS=y
@ -32,6 +32,7 @@ CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_ARCH_HAS_DEFAULT_IDLE=y
CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
CONFIG_DMA_NONCOHERENT=y
CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@ -47,9 +48,11 @@ CONFIG_LOCALVERSION=""
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
CONFIG_HAVE_KERNEL_LZMA=y
CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_BZIP2 is not set
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_LZO is not set
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
@ -71,14 +74,8 @@ CONFIG_RCU_FANOUT=32
# CONFIG_TREE_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
CONFIG_GROUP_SCHED=y
CONFIG_FAIR_GROUP_SCHED=y
# CONFIG_RT_GROUP_SCHED is not set
CONFIG_USER_SCHED=y
# CONFIG_CGROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
@ -107,7 +104,7 @@ CONFIG_PERF_USE_VMALLOC=y
#
# Kernel Performance Events And Counters
#
# CONFIG_PERF_EVENTS is not set
CONFIG_PERF_EVENTS=y
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_COMPAT_BRK=y
@ -116,13 +113,13 @@ CONFIG_SLAB=y
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_CLK=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_HW_BREAKPOINT=y
#
# GCOV-based kernel profiling
@ -234,12 +231,12 @@ CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_QUICKLIST=y
CONFIG_MMU=y
CONFIG_PAGE_OFFSET=0x80000000
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_FORCE_MAX_ZONEORDER=12
CONFIG_MEMORY_START=0x08000000
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_29BIT=y
# CONFIG_PMB_ENABLE is not set
# CONFIG_X2TLB is not set
# CONFIG_PMB is not set
CONFIG_X2TLB=y
CONFIG_VSYSCALL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@ -247,6 +244,8 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_MAX_ACTIVE_REGIONS=1
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_IOREMAP_FIXED=y
CONFIG_UNCACHED_MAPPING=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
@ -262,7 +261,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=2
CONFIG_NR_QUICK=1
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
@ -337,7 +336,6 @@ CONFIG_SECCOMP=y
# CONFIG_PREEMPT_VOLUNTARY is not set
CONFIG_PREEMPT=y
CONFIG_GUSA=y
# CONFIG_SPARSE_IRQ is not set
#
# Boot options
@ -347,7 +345,7 @@ CONFIG_BOOT_LINK_OFFSET=0x00800000
CONFIG_ENTRY_OFFSET=0x00001000
CONFIG_CMDLINE_OVERWRITE=y
# CONFIG_CMDLINE_EXTEND is not set
CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=120M memchunk.vpu=4m"
CONFIG_CMDLINE="console=tty0, console=ttySC0,115200 root=/dev/nfs ip=dhcp mem=248M memchunk.vpu=8m memchunk.veu0=4m"
#
# Bus options
@ -373,6 +371,7 @@ CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_HIBERNATION is not set
CONFIG_PM_RUNTIME=y
CONFIG_PM_OPS=y
# CONFIG_CPU_IDLE is not set
CONFIG_NET=y
@ -380,7 +379,6 @@ CONFIG_NET=y
# Networking options
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@ -445,7 +443,45 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
CONFIG_IRDA=y
#
# IrDA protocols
#
# CONFIG_IRLAN is not set
# CONFIG_IRCOMM is not set
# CONFIG_IRDA_ULTRA is not set
#
# IrDA options
#
# CONFIG_IRDA_CACHE_LAST_LSAP is not set
# CONFIG_IRDA_FAST_RR is not set
# CONFIG_IRDA_DEBUG is not set
#
# Infrared-port device drivers
#
#
# SIR device drivers
#
# CONFIG_IRTTY_SIR is not set
#
# Dongle support
#
CONFIG_SH_SIR=y
# CONFIG_KINGSUN_DONGLE is not set
# CONFIG_KSDAZZLE_DONGLE is not set
# CONFIG_KS959_DONGLE is not set
#
# FIR device drivers
#
# CONFIG_USB_IRDA is not set
# CONFIG_SIGMATEL_FIR is not set
# CONFIG_MCS_FIR is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
@ -556,6 +592,7 @@ CONFIG_MTD_NAND_IDS=y
# CONFIG_MTD_NAND_NANDSIM is not set
# CONFIG_MTD_NAND_PLATFORM is not set
# CONFIG_MTD_ALAUDA is not set
# CONFIG_MTD_NAND_SH_FLCTL is not set
# CONFIG_MTD_ONENAND is not set
#
@ -597,6 +634,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
@ -616,6 +654,7 @@ CONFIG_HAVE_IDE=y
#
# SCSI device support
#
CONFIG_SCSI_MOD=y
# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@ -768,7 +807,29 @@ CONFIG_KEYBOARD_SH_KEYSC=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
CONFIG_INPUT_TOUCHSCREEN=y
# CONFIG_TOUCHSCREEN_ADS7846 is not set
# CONFIG_TOUCHSCREEN_AD7877 is not set
# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
# CONFIG_TOUCHSCREEN_EETI is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
# CONFIG_TOUCHSCREEN_MCS5000 is not set
# CONFIG_TOUCHSCREEN_MTOUCH is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MK712 is not set
# CONFIG_TOUCHSCREEN_PENMOUNT is not set
# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
CONFIG_TOUCHSCREEN_TSC2007=y
# CONFIG_TOUCHSCREEN_W90X900 is not set
# CONFIG_INPUT_MISC is not set
#
@ -802,10 +863,10 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_TIMBERDALE is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_TIMERIOMEM is not set
@ -830,6 +891,7 @@ CONFIG_I2C_HELPER_AUTO=y
# CONFIG_I2C_OCORES is not set
CONFIG_I2C_SH_MOBILE=y
# CONFIG_I2C_SIMTEC is not set
# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@ -843,15 +905,9 @@ CONFIG_I2C_SH_MOBILE=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
CONFIG_SPI=y
CONFIG_SPI_MASTER=y
@ -882,13 +938,16 @@ CONFIG_GPIOLIB=y
#
# Memory mapped GPIO expanders:
#
# CONFIG_GPIO_IT8761E is not set
#
# I2C GPIO expanders:
#
# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX732X is not set
# CONFIG_GPIO_PCA953X is not set
# CONFIG_GPIO_PCF857X is not set
# CONFIG_GPIO_ADP5588 is not set
#
# PCI GPIO expanders:
@ -919,23 +978,26 @@ CONFIG_SSB_POSSIBLE=y
#
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
CONFIG_MFD_CORE=y
# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SH_MOBILE_SDHI is not set
CONFIG_MFD_SH_MOBILE_SDHI=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_HTC_I2CPLD is not set
# CONFIG_TPS65010 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_MFD_88PM8607 is not set
# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
CONFIG_MEDIA_SUPPORT=y
@ -985,10 +1047,10 @@ CONFIG_SOC_CAMERA=y
# CONFIG_SOC_CAMERA_MT9M001 is not set
# CONFIG_SOC_CAMERA_MT9M111 is not set
# CONFIG_SOC_CAMERA_MT9T031 is not set
# CONFIG_SOC_CAMERA_MT9T112 is not set
CONFIG_SOC_CAMERA_MT9T112=y
# CONFIG_SOC_CAMERA_MT9V022 is not set
# CONFIG_SOC_CAMERA_RJ54N1 is not set
# CONFIG_SOC_CAMERA_TW9910 is not set
CONFIG_SOC_CAMERA_TW9910=y
# CONFIG_SOC_CAMERA_PLATFORM is not set
# CONFIG_SOC_CAMERA_OV772X is not set
# CONFIG_SOC_CAMERA_OV9640 is not set
@ -1001,6 +1063,7 @@ CONFIG_RADIO_ADAPTERS=y
# CONFIG_RADIO_SI470X is not set
# CONFIG_USB_MR800 is not set
# CONFIG_RADIO_TEA5764 is not set
# CONFIG_RADIO_SAA7706H is not set
# CONFIG_RADIO_TEF6862 is not set
# CONFIG_DAB is not set
@ -1034,6 +1097,7 @@ CONFIG_FB_DEFERRED_IO=y
#
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_SH_MOBILE_LCDC=y
# CONFIG_FB_TMIO is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
@ -1062,7 +1126,46 @@ CONFIG_LOGO=y
# CONFIG_LOGO_SUPERH_MONO is not set
# CONFIG_LOGO_SUPERH_VGA16 is not set
CONFIG_LOGO_SUPERH_CLUT224=y
# CONFIG_SOUND is not set
CONFIG_SOUND=y
CONFIG_SOUND_OSS_CORE=y
CONFIG_SOUND_OSS_CORE_PRECLAIM=y
CONFIG_SND=y
CONFIG_SND_TIMER=y
CONFIG_SND_PCM=y
CONFIG_SND_JACK=y
CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQ_DUMMY=y
CONFIG_SND_OSSEMUL=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
CONFIG_SND_PCM_OSS_PLUGINS=y
# CONFIG_SND_SEQUENCER_OSS is not set
# CONFIG_SND_DYNAMIC_MINORS is not set
CONFIG_SND_SUPPORT_OLD_API=y
CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
# CONFIG_SND_RAWMIDI_SEQ is not set
# CONFIG_SND_OPL3_LIB_SEQ is not set
# CONFIG_SND_OPL4_LIB_SEQ is not set
# CONFIG_SND_SBAWE_SEQ is not set
# CONFIG_SND_EMU10K1_SEQ is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_SPI is not set
CONFIG_SND_SUPERH=y
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
#
# SoC Audio support for SuperH
#
CONFIG_SND_SOC_SH4_FSI=y
# CONFIG_SND_FSI_AK4642 is not set
CONFIG_SND_FSI_DA7210=y
CONFIG_SND_SOC_I2C_AND_SPI=y
# CONFIG_SND_SOC_ALL_CODECS is not set
CONFIG_SND_SOC_DA7210=y
# CONFIG_SOUND_PRIME is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HIDRAW is not set
@ -1077,6 +1180,7 @@ CONFIG_USB_HID=y
#
# Special HID drivers
#
# CONFIG_HID_3M_PCT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
@ -1091,12 +1195,16 @@ CONFIG_USB_HID=y
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
# CONFIG_HID_MOSART is not set
# CONFIG_HID_MONTEREY is not set
# CONFIG_HID_NTRIG is not set
# CONFIG_HID_ORTEK is not set
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
# CONFIG_HID_QUANTA is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
# CONFIG_HID_STANTUM is not set
# CONFIG_HID_SUNPLUS is not set
# CONFIG_HID_GREENASIA is not set
# CONFIG_HID_SMARTJOYPLUS is not set
@ -1136,6 +1244,7 @@ CONFIG_USB_MON=y
# CONFIG_USB_SL811_HCD is not set
CONFIG_USB_R8A66597_HCD=y
# CONFIG_USB_HWA_HCD is not set
# CONFIG_USB_GADGET_MUSB_HDRC is not set
#
# USB Device Class drivers
@ -1188,7 +1297,6 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@ -1200,8 +1308,45 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
CONFIG_USB_GADGET=y
# CONFIG_USB_GADGET_DEBUG_FILES is not set
# CONFIG_USB_GADGET_DEBUG_FS is not set
CONFIG_USB_GADGET_VBUS_DRAW=2
CONFIG_USB_GADGET_SELECTED=y
# CONFIG_USB_GADGET_AT91 is not set
# CONFIG_USB_GADGET_ATMEL_USBA is not set
# CONFIG_USB_GADGET_FSL_USB2 is not set
# CONFIG_USB_GADGET_LH7A40X is not set
# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_PXA25X is not set
CONFIG_USB_GADGET_R8A66597=y
CONFIG_USB_R8A66597=y
# CONFIG_USB_GADGET_PXA27X is not set
# CONFIG_USB_GADGET_S3C_HSOTG is not set
# CONFIG_USB_GADGET_IMX is not set
# CONFIG_USB_GADGET_S3C2410 is not set
# CONFIG_USB_GADGET_M66592 is not set
# CONFIG_USB_GADGET_AMD5536UDC is not set
# CONFIG_USB_GADGET_FSL_QE is not set
# CONFIG_USB_GADGET_CI13XXX is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LANGWELL is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
CONFIG_USB_GADGET_DUALSPEED=y
# CONFIG_USB_ZERO is not set
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_ETH is not set
# CONFIG_USB_GADGETFS is not set
CONFIG_USB_FILE_STORAGE=m
# CONFIG_USB_FILE_STORAGE_TEST is not set
# CONFIG_USB_MASS_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
# CONFIG_USB_MIDI_GADGET is not set
# CONFIG_USB_G_PRINTER is not set
# CONFIG_USB_CDC_COMPOSITE is not set
# CONFIG_USB_G_NOKIA is not set
# CONFIG_USB_G_MULTI is not set
#
# OTG and related infrastructure
@ -1224,10 +1369,8 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# MMC/SD/SDIO Host Controller Drivers
#
# CONFIG_MMC_SDHCI is not set
# CONFIG_MMC_AT91 is not set
# CONFIG_MMC_ATMELMCI is not set
CONFIG_MMC_SPI=y
# CONFIG_MMC_TMIO is not set
CONFIG_MMC_TMIO=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
@ -1253,10 +1396,10 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_DS1374 is not set
# CONFIG_RTC_DRV_DS1672 is not set
# CONFIG_RTC_DRV_MAX6900 is not set
# CONFIG_RTC_DRV_RS5C372 is not set
CONFIG_RTC_DRV_RS5C372=y
# CONFIG_RTC_DRV_ISL1208 is not set
# CONFIG_RTC_DRV_X1205 is not set
CONFIG_RTC_DRV_PCF8563=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_BQ32K is not set
@ -1303,8 +1446,6 @@ CONFIG_RTC_DRV_PCF8563=y
CONFIG_UIO=y
# CONFIG_UIO_PDRV is not set
CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_UIO_SMX is not set
# CONFIG_UIO_SERCOS3 is not set
#
# TI VLYNQ
@ -1390,6 +1531,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_EFS_FS is not set
# CONFIG_JFFS2_FS is not set
# CONFIG_UBIFS_FS is not set
# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@ -1418,6 +1560,7 @@ CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
@ -1487,6 +1630,7 @@ CONFIG_DEBUG_FS=y
CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LKDTM is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_HAVE_FUNCTION_TRACER=y
@ -1618,7 +1762,7 @@ CONFIG_CRYPTO_HW=y
#
CONFIG_BITREVERSE=y
CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
CONFIG_CRC_CCITT=y
# CONFIG_CRC16 is not set
CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y

View File

@ -211,7 +211,9 @@ extern void __kernel_vsyscall;
#define VSYSCALL_AUX_ENT \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
else \
NEW_AUX_ENT(AT_IGNORE, 0);
#else
#define VSYSCALL_AUX_ENT
#endif /* CONFIG_VSYSCALL */
@ -219,7 +221,7 @@ extern void __kernel_vsyscall;
#ifdef CONFIG_SH_FPU
#define FPU_AUX_ENT NEW_AUX_ENT(AT_FPUCW, FPSCR_INIT)
#else
#define FPU_AUX_ENT
#define FPU_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0)
#endif
extern int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;

View File

@ -30,6 +30,8 @@
#define MMUCR_URB 0x00FC0000
#define MMUCR_URB_SHIFT 18
#define MMUCR_URB_NENTRIES 64
#define MMUCR_URC 0x0000FC00
#define MMUCR_URC_SHIFT 10
#if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40)
#define MMUCR_SE (1 << 4)

View File

@ -48,7 +48,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
return -ENODEV;
cpus_allowed = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
set_cpus_allowed_ptr(current, cpumask_of(cpu));
BUG_ON(smp_processor_id() != cpu);
@ -66,7 +66,7 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
freqs.flags = 0;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
set_cpus_allowed(current, cpus_allowed);
set_cpus_allowed_ptr(current, &cpus_allowed);
clk_set_rate(cpuclk, freq);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);

View File

@ -9,6 +9,7 @@
* for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/dwarf.h>
#ifdef CONFIG_DWARF_UNWINDER
@ -52,3 +53,5 @@ void *return_address(unsigned int depth)
}
#endif
EXPORT_SYMBOL_GPL(return_address);

View File

@ -69,6 +69,7 @@ asmlinkage void __cpuinit start_secondary(void)
unsigned int cpu;
struct mm_struct *mm = &init_mm;
enable_mmu();
atomic_inc(&mm->mm_count);
atomic_inc(&mm->mm_users);
current->active_mm = mm;

View File

@ -77,3 +77,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
back_to_cached();
}
void local_flush_tlb_all(void)
{
unsigned long flags, status;
int i;
/*
* Flush all the TLB.
*/
local_irq_save(flags);
jump_to_uncached();
status = __raw_readl(MMUCR);
status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
if (status == 0)
status = MMUCR_URB_NENTRIES;
for (i = 0; i < status; i++)
__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
for (i = 0; i < 4; i++)
__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
back_to_cached();
ctrl_barrier();
local_irq_restore(flags);
}

View File

@ -77,3 +77,22 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++)
__raw_writel(data, addr + (i << 8));
}
void local_flush_tlb_all(void)
{
unsigned long flags, status;
/*
* Flush all the TLB.
*
* Write to the MMU control register's bit:
* TF-bit for SH-3, TI-bit for SH-4.
* It's same position, bit #2.
*/
local_irq_save(flags);
status = __raw_readl(MMUCR);
status |= 0x04;
__raw_writel(status, MMUCR);
ctrl_barrier();
local_irq_restore(flags);
}

View File

@ -80,3 +80,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
__raw_writel(data, addr);
back_to_cached();
}
void local_flush_tlb_all(void)
{
unsigned long flags, status;
int i;
/*
* Flush all the TLB.
*/
local_irq_save(flags);
jump_to_uncached();
status = __raw_readl(MMUCR);
status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
if (status == 0)
status = MMUCR_URB_NENTRIES;
for (i = 0; i < status; i++)
__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
for (i = 0; i < 4; i++)
__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
back_to_cached();
ctrl_barrier();
local_irq_restore(flags);
}

View File

@ -24,13 +24,9 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
local_irq_save(flags);
/* Load the entry into the TLB */
__update_tlb(vma, addr, pte);
/* ... and wire it up. */
status = __raw_readl(MMUCR);
urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
status &= ~MMUCR_URB;
status &= ~MMUCR_URC;
/*
* Make sure we're not trying to wire the last TLB entry slot.
@ -39,7 +35,23 @@ void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
urb = urb % MMUCR_URB_NENTRIES;
/*
* Insert this entry into the highest non-wired TLB slot (via
* the URC field).
*/
status |= (urb << MMUCR_URC_SHIFT);
__raw_writel(status, MMUCR);
ctrl_barrier();
/* Load the entry into the TLB */
__update_tlb(vma, addr, pte);
/* ... and wire it up. */
status = __raw_readl(MMUCR);
status &= ~MMUCR_URB;
status |= (urb << MMUCR_URB_SHIFT);
__raw_writel(status, MMUCR);
ctrl_barrier();

View File

@ -119,31 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm)
local_irq_restore(flags);
}
}
void local_flush_tlb_all(void)
{
unsigned long flags, status;
int i;
/*
* Flush all the TLB.
*/
local_irq_save(flags);
jump_to_uncached();
status = __raw_readl(MMUCR);
status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
if (status == 0)
status = MMUCR_URB_NENTRIES;
for (i = 0; i < status; i++)
__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
for (i = 0; i < 4; i++)
__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
back_to_cached();
ctrl_barrier();
local_irq_restore(flags);
}

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.33
# Wed Mar 3 02:54:29 2010
# Linux kernel version: 2.6.34-rc3
# Sat Apr 3 15:49:56 2010
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@ -23,6 +23,7 @@ CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_MMU=y
CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@ -439,6 +440,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@ -511,6 +513,7 @@ CONFIG_BLK_DEV_IDEDMA=y
#
# SCSI device support
#
CONFIG_SCSI_MOD=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@ -888,6 +891,7 @@ CONFIG_SERIAL_SUNHV=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@ -935,6 +939,7 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@ -948,15 +953,9 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
#
# Miscellaneous I2C Chip support
#
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@ -982,10 +981,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@ -1052,18 +1052,21 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_MFD_88PM8607 is not set
# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@ -1113,6 +1116,7 @@ CONFIG_FB_FFB=y
# CONFIG_FB_LEO is not set
CONFIG_FB_XVR500=y
CONFIG_FB_XVR2500=y
CONFIG_FB_XVR1000=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
@ -1430,7 +1434,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@ -1443,7 +1446,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@ -1610,6 +1612,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@ -1624,6 +1627,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set

View File

@ -46,6 +46,81 @@ stack_trace_flush:
nop
.size stack_trace_flush,.-stack_trace_flush
#ifdef CONFIG_PERF_EVENTS
.globl perf_arch_fetch_caller_regs
.type perf_arch_fetch_caller_regs,#function
perf_arch_fetch_caller_regs:
/* We always read the %pstate into %o5 since we will use
* that to construct a fake %tstate to store into the regs.
*/
rdpr %pstate, %o5
brz,pn %o2, 50f
mov %o2, %g7
/* Turn off interrupts while we walk around the register
* window by hand.
*/
wrpr %o5, PSTATE_IE, %pstate
/* The %canrestore tells us how many register windows are
* still live in the chip above us, past that we have to
* walk the frame as saved on the stack. We stash away
* the %cwp in %g1 so we can return back to the original
* register window.
*/
rdpr %cwp, %g1
rdpr %canrestore, %g2
sub %g1, 1, %g3
/* We have the skip count in %g7, if it hits zero then
* %fp/%i7 are the registers we need. Otherwise if our
* %canrestore count maintained in %g2 hits zero we have
* to start traversing the stack.
*/
10: brz,pn %g2, 4f
sub %g2, 1, %g2
wrpr %g3, %cwp
subcc %g7, 1, %g7
bne,pt %xcc, 10b
sub %g3, 1, %g3
/* We found the values we need in the cpu's register
* windows.
*/
mov %fp, %g3
ba,pt %xcc, 3f
mov %i7, %g2
50: mov %fp, %g3
ba,pt %xcc, 2f
mov %i7, %g2
/* We hit the end of the valid register windows in the
* cpu, start traversing the stack frame.
*/
4: mov %fp, %g3
20: ldx [%g3 + STACK_BIAS + RW_V9_I7], %g2
subcc %g7, 1, %g7
bne,pn %xcc, 20b
ldx [%g3 + STACK_BIAS + RW_V9_I6], %g3
/* Restore the current register window position and
* re-enable interrupts.
*/
3: wrpr %g1, %cwp
wrpr %o5, %pstate
2: stx %g3, [%o0 + PT_V9_FP]
sllx %o5, 8, %o5
stx %o5, [%o0 + PT_V9_TSTATE]
stx %g2, [%o0 + PT_V9_TPC]
add %g2, 4, %g2
retl
stx %g2, [%o0 + PT_V9_TNPC]
.size perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_SMP
.globl hard_smp_processor_id
.type hard_smp_processor_id,#function

View File

@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
return -EFAULT;
@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
put_user(reg, u++))
@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
return -EFAULT;
@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, &reg_window[pos++]))

View File

@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||

View File

@ -2117,7 +2117,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
"node=%d entry=%lu/%lu\n", start, block, nr,
node,
addr >> VMEMMAP_CHUNK_SHIFT,
VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
VMEMMAP_SIZE);
}
}
return 0;

View File

@ -29,6 +29,7 @@
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/compat.h>
static u64 perf_event_mask __read_mostly;
@ -159,7 +160,7 @@ struct x86_pmu {
struct perf_event *event);
struct event_constraint *event_constraints;
void (*cpu_prepare)(int cpu);
int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
@ -1334,11 +1335,12 @@ static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
x86_pmu.cpu_prepare(cpu);
ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
@ -1351,6 +1353,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
x86_pmu.cpu_dying(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
@ -1360,7 +1363,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
}
return NOTIFY_OK;
return ret;
}
static void __init pmu_check_apic(void)
@ -1629,14 +1632,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return len;
}
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
#ifdef CONFIG_COMPAT
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
unsigned long bytes;
/* 32-bit process in 64-bit kernel. */
struct stack_frame_ia32 frame;
const void __user *fp;
bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
if (!test_thread_flag(TIF_IA32))
return 0;
return bytes == sizeof(*frame);
fp = compat_ptr(regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if (fp < compat_ptr(regs->sp))
break;
callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame);
}
return 1;
}
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
return 0;
}
#endif
static void
perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@ -1652,11 +1683,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry))
return;
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
if (!copy_stack_frame(fp, &frame))
bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
if (bytes != sizeof(frame))
break;
if ((unsigned long)fp < regs->sp)
@ -1703,7 +1739,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return entry;
}
#ifdef CONFIG_EVENT_TRACING
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
{
regs->ip = ip;
@ -1715,4 +1750,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
regs->cs = __KERNEL_CS;
local_save_flags(regs->flags);
}
#endif

View File

@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0;
}
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
{
struct amd_nb *nb = cpuc->amd_nb;
return nb && nb->nb_id != -1;
}
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
/*
* only care about NB events
*/
if (!(nb && amd_is_nb_event(hwc)))
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return;
/*
@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
/*
* if not NB event or no NB, then no constraints
*/
if (!(nb && amd_is_nb_event(hwc)))
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return &unconstrained;
/*
@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
return nb;
}
static void amd_pmu_cpu_online(int cpu)
static int amd_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpu1, *cpu2;
struct amd_nb *nb = NULL;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
WARN_ON_ONCE(cpuc->amd_nb);
if (boot_cpu_data.x86_max_cores < 2)
return NOTIFY_OK;
cpuc->amd_nb = amd_alloc_nb(cpu, -1);
if (!cpuc->amd_nb)
return NOTIFY_BAD;
return NOTIFY_OK;
}
static void amd_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct amd_nb *nb;
int i, nb_id;
if (boot_cpu_data.x86_max_cores < 2)
return;
/*
* function may be called too early in the
* boot process, in which case nb_id is bogus
*/
nb_id = amd_get_nb_id(cpu);
if (nb_id == BAD_APICID)
return;
cpu1 = &per_cpu(cpu_hw_events, cpu);
cpu1->amd_nb = NULL;
WARN_ON_ONCE(nb_id == BAD_APICID);
raw_spin_lock(&amd_nb_lock);
for_each_online_cpu(i) {
cpu2 = &per_cpu(cpu_hw_events, i);
nb = cpu2->amd_nb;
if (!nb)
nb = per_cpu(cpu_hw_events, i).amd_nb;
if (WARN_ON_ONCE(!nb))
continue;
if (nb->nb_id == nb_id)
goto found;
if (nb->nb_id == nb_id) {
kfree(cpuc->amd_nb);
cpuc->amd_nb = nb;
break;
}
}
nb = amd_alloc_nb(cpu, nb_id);
if (!nb) {
pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
raw_spin_unlock(&amd_nb_lock);
return;
}
found:
nb->refcnt++;
cpu1->amd_nb = nb;
cpuc->amd_nb->nb_id = nb_id;
cpuc->amd_nb->refcnt++;
raw_spin_unlock(&amd_nb_lock);
}
static void amd_pmu_cpu_offline(int cpu)
static void amd_pmu_cpu_dead(int cpu)
{
struct cpu_hw_events *cpuhw;
@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
raw_spin_lock(&amd_nb_lock);
if (cpuhw->amd_nb) {
if (--cpuhw->amd_nb->refcnt == 0)
kfree(cpuhw->amd_nb);
struct amd_nb *nb = cpuhw->amd_nb;
if (nb->nb_id == -1 || --nb->refcnt == 0)
kfree(nb);
cpuhw->amd_nb = NULL;
}
@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
.get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,
.cpu_prepare = amd_pmu_cpu_online,
.cpu_dead = amd_pmu_cpu_offline,
.cpu_prepare = amd_pmu_cpu_prepare,
.cpu_starting = amd_pmu_cpu_starting,
.cpu_dead = amd_pmu_cpu_dead,
};
static __init int amd_pmu_init(void)

View File

@ -30,6 +30,11 @@ struct stack_frame {
unsigned long return_address;
};
struct stack_frame_ia32 {
u32 next_frame;
u32 return_address;
};
static inline unsigned long rewind_frame_pointer(int n)
{
struct stack_frame *frame;

View File

@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/start_kernel.h>
#include <linux/mm.h>
#include <asm/setup.h>
#include <asm/sections.h>
@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
/* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = ramdisk_image + ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif

View File

@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
/* Assume only end is not page aligned */
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif

View File

@ -618,8 +618,8 @@ int kgdb_arch_init(void)
* portion of kgdb because this operation requires mutexs to
* complete.
*/
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
attr.type = PERF_TYPE_BREAKPOINT;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;

View File

@ -313,16 +313,17 @@ static void __init reserve_brk(void)
#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
static void __init relocate_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 area_size = PAGE_ALIGN(ramdisk_size);
u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
u64 ramdisk_here;
unsigned long slop, clen, mapaddr;
char *p, *q;
/* We need to move the initrd down into lowmem */
ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
PAGE_SIZE);
if (ramdisk_here == -1ULL)
@ -331,7 +332,7 @@ static void __init relocate_initrd(void)
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
reserve_early(ramdisk_here, ramdisk_here + ramdisk_size,
reserve_early(ramdisk_here, ramdisk_here + area_size,
"NEW RAMDISK");
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
@ -375,9 +376,10 @@ static void __init relocate_initrd(void)
static void __init reserve_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
u64 ramdisk_end = ramdisk_image + ramdisk_size;
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
if (!boot_params.hdr.type_of_loader ||

View File

@ -243,8 +243,6 @@ static void __cpuinit smp_callin(void)
end_local_APIC_setup();
map_cpu_to_logical_apicid();
notify_cpu_starting(cpuid);
/*
* Need to setup vector mappings before we enable interrupts.
*/
@ -265,6 +263,8 @@ static void __cpuinit smp_callin(void)
*/
smp_store_cpu_info(cpuid);
notify_cpu_starting(cpuid);
/*
* Allow the master to continue.
*/

View File

@ -291,8 +291,8 @@ SECTIONS
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
__smp_locks_end = .;
. = ALIGN(PAGE_SIZE);
__smp_locks_end = .;
}
#ifdef CONFIG_X86_64

View File

@ -332,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr)
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr = begin;
unsigned long addr;
unsigned long begin_aligned, end_aligned;
if (addr >= end)
/* Make sure boundaries are page aligned */
begin_aligned = PAGE_ALIGN(begin);
end_aligned = end & PAGE_MASK;
if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
begin = begin_aligned;
end = end_aligned;
}
if (begin >= end)
return;
addr = begin;
/*
* If debugging page accesses then do not free this memory but
* mark them not present - any buggy init-section access will
@ -344,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
begin, PAGE_ALIGN(end));
begin, end);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
/*
@ -359,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)(addr & ~(PAGE_SIZE-1)),
POISON_FREE_INITMEM, PAGE_SIZE);
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@ -377,6 +388,15 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_init_pages("initrd memory", start, end);
/*
* end could be not aligned, and We can not align that,
* decompresser could be confused by aligned initrd_end
* We already reserve the end partial page before in
* - i386_start_kernel()
* - x86_64_start_kernel()
* - relocate_initrd()
* So here We can do PAGE_ALIGN() safely to get partial page to be freed
*/
free_init_pages("initrd memory", start, PAGE_ALIGN(end));
}
#endif

View File

@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
list_del_init(&tty->tty_files);
file_list_unlock();
put_pid(tty->pgrp);
put_pid(tty->session);
free_tty_struct(tty);
}

View File

@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}

View File

@ -708,15 +708,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
/* perform the basic check for the detailed timing */
if (mode->hsync_end > mode->htotal ||
mode->vsync_end > mode->vtotal) {
drm_mode_destroy(dev, mode);
DRM_DEBUG_KMS("Incorrect detailed timing. "
"Sync is beyond the blank.\n");
return NULL;
}
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;

View File

@ -284,6 +284,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_on(struct fb_info *info)

View File

@ -141,14 +141,16 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
out:
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
if (!retcode) {
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
else if (dev->dev_mapping != inode->i_mapping)
retcode = -ENODEV;
}
mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->struct_mutex);
return retcode;
}

View File

@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_dp.o nouveau_grctx.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \

View File

@ -5210,6 +5210,21 @@ divine_connector_type(struct nvbios *bios, int index)
return type;
}
static void
apply_dcb_connector_quirks(struct nvbios *bios, int idx)
{
struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
struct drm_device *dev = bios->dev;
/* Gigabyte NX85T */
if ((dev->pdev->device == 0x0421) &&
(dev->pdev->subsystem_vendor == 0x1458) &&
(dev->pdev->subsystem_device == 0x344c)) {
if (cte->type == DCB_CONNECTOR_HDMI_1)
cte->type = DCB_CONNECTOR_DVI_I;
}
}
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios)
entry = conntab + conntab[1];
cte = &ct->entry[0];
for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
cte->index = i;
if (conntab[3] == 2)
cte->entry = ROM16(entry[0]);
else
cte->entry = ROM32(entry[0]);
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index = (cte->entry & 0x00000f00) >> 8;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
case 0x00001000:
cte->gpio_tag = 0x07;
@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios)
if (cte->type == 0xff)
continue;
apply_dcb_connector_quirks(bios, i);
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios)
break;
default:
cte->type = divine_connector_type(bios, cte->index);
NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
break;
}
if (nouveau_override_conntype) {
int type = divine_connector_type(bios, cte->index);
if (type != cte->type)
NV_WARN(dev, " -> type 0x%02x\n", cte->type);
}
}
}

View File

@ -72,9 +72,10 @@ enum dcb_connector_type {
};
struct dcb_connector_table_entry {
uint8_t index;
uint32_t entry;
enum dcb_connector_type type;
uint8_t index;
uint8_t index2;
uint8_t gpio_tag;
};

View File

@ -440,8 +440,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
TTM_PL_FLAG_SYSTEM);
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
break;
default:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);

View File

@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
if (!nv_encoder)
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
if (nv_encoder) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);

View File

@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
DRM_MEMORYBARRIER();
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}

View File

@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
int nouveau_override_conntype = 0;
module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
NV_INFO(dev, "Disabling fbcon acceleration...\n");
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;

View File

@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds;
extern int nouveau_vram_pushbuf;
extern int nouveau_vram_notify;
extern int nouveau_fbpercrtc;
extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
@ -688,6 +689,7 @@ extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
extern int nouveau_override_conntype;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);

View File

@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value,
#define nouveau_print_bitfield_names(val, namelist) \
nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
struct nouveau_enum_names {
uint32_t value;
const char *name;
};
static void
nouveau_print_enum_names_(uint32_t value,
const struct nouveau_enum_names *namelist,
const int namelist_len)
{
/*
* Caller must have already printed the KERN_* log level for us.
* Also the caller is responsible for adding the newline.
*/
int i;
for (i = 0; i < namelist_len; ++i) {
if (value == namelist[i].value) {
printk("%s", namelist[i].name);
return;
}
}
printk("unknown value 0x%08x", value);
}
#define nouveau_print_enum_names(val, namelist) \
nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
static int
nouveau_graph_chid_from_grctx(struct drm_device *dev)
@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
NV_INFO(dev, "%s - nSource:", id);
nouveau_print_bitfield_names(nsource, nsource_names);
printk(", nStatus:");
if (dev_priv->card_type < NV_10)
nouveau_print_bitfield_names(nstatus, nstatus_names);
else
nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
printk("\n");
if (dev_priv->card_type < NV_50) {
NV_INFO(dev, "%s - nSource:", id);
nouveau_print_bitfield_names(nsource, nsource_names);
printk(", nStatus:");
if (dev_priv->card_type < NV_10)
nouveau_print_bitfield_names(nstatus, nstatus_names);
else
nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
printk("\n");
}
NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
"Data 0x%08x:0x%08x\n",
@ -577,28 +604,503 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
}
static void
nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t trap[6];
int i, ch;
uint32_t idx = nv_rd32(dev, 0x100c90);
if (idx & 0x80000000) {
idx &= 0xffffff;
if (display) {
for (i = 0; i < 6; i++) {
nv_wr32(dev, 0x100c90, idx | i << 24);
trap[i] = nv_rd32(dev, 0x100c94);
}
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
struct nouveau_channel *chan = dev_priv->fifos[ch];
if (!chan || !chan->ramin)
continue;
if (trap[1] == chan->ramin->instance >> 12)
break;
}
NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
name, (trap[5]&0x100?"read":"write"),
trap[5]&0xff, trap[4]&0xffff,
trap[3]&0xffff, trap[0], trap[2], ch);
}
nv_wr32(dev, 0x100c90, idx | 0x80000000);
} else if (display) {
NV_INFO(dev, "%s - no VM fault?\n", name);
}
}
static struct nouveau_enum_names nv50_mp_exec_error_names[] =
{
{ 3, "STACK_UNDERFLOW" },
{ 4, "QUADON_ACTIVE" },
{ 8, "TIMEOUT" },
{ 0x10, "INVALID_OPCODE" },
{ 0x40, "BREAKPOINT" },
};
static void
nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t units = nv_rd32(dev, 0x1540);
uint32_t addr, mp10, status, pc, oplow, ophigh;
int i;
int mps = 0;
for (i = 0; i < 4; i++) {
if (!(units & 1 << (i+24)))
continue;
if (dev_priv->chipset < 0xa0)
addr = 0x408200 + (tpid << 12) + (i << 7);
else
addr = 0x408100 + (tpid << 11) + (i << 7);
mp10 = nv_rd32(dev, addr + 0x10);
status = nv_rd32(dev, addr + 0x14);
if (!status)
continue;
if (display) {
nv_rd32(dev, addr + 0x20);
pc = nv_rd32(dev, addr + 0x24);
oplow = nv_rd32(dev, addr + 0x70);
ophigh= nv_rd32(dev, addr + 0x74);
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_print_enum_names(status,
nv50_mp_exec_error_names);
printk(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
nv_wr32(dev, addr + 0x10, mp10);
nv_wr32(dev, addr + 0x14, 0);
mps++;
}
if (!mps && display)
NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
"No MPs claiming errors?\n", tpid);
}
static void
nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t ustatus_new, int display, const char *name)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int tps = 0;
uint32_t units = nv_rd32(dev, 0x1540);
int i, r;
uint32_t ustatus_addr, ustatus;
for (i = 0; i < 16; i++) {
if (!(units & (1 << i)))
continue;
if (dev_priv->chipset < 0xa0)
ustatus_addr = ustatus_old + (i << 12);
else
ustatus_addr = ustatus_new + (i << 11);
ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
if (!ustatus)
continue;
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
nv50_pfb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
}
break;
case 7: /* MP error */
if (ustatus & 0x00010000) {
nv50_pgraph_mp_trap(dev, i, display);
ustatus &= ~0x00010000;
}
break;
case 8: /* TPDMA error */
{
uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
nv50_pfb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000010;
}
/* Render target */
if (ustatus & 0x00000040) {
if (display) {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000040;
}
/* CUDA memory: l[], g[] or stack. */
if (ustatus & 0x00000080) {
if (display) {
if (e18 & 0x80000000) {
/* g[] read fault? */
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 24) & 0x1f));
e18 &= ~0x1f000000;
} else if (e18 & 0xc) {
/* g[] write fault? */
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 7) & 0x1f));
e18 &= ~0x00000f80;
} else {
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
i, e14, e10);
}
NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000080;
}
}
break;
}
if (ustatus) {
if (display)
NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
nv_wr32(dev, ustatus_addr, 0xc0000000);
}
if (!tps && display)
NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
}
static void
nv50_pgraph_trap_handler(struct drm_device *dev)
{
struct nouveau_pgraph_trap trap;
uint32_t status = nv_rd32(dev, 0x400108);
uint32_t ustatus;
int display = nouveau_ratelimit();
if (!status && display) {
nouveau_graph_trap_info(dev, &trap);
nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
}
/* DISPATCH: Relays commands to other units and handles NOTIFY,
* COND, QUERY. If you get a trap from it, the command is still stuck
* in DISPATCH and you need to do something about it. */
if (status & 0x001) {
ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
}
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x400808) & 0x80000000) {
if (display) {
if (nouveau_graph_trapped_channel(dev, &trap.channel))
trap.channel = -1;
trap.class = nv_rd32(dev, 0x400814);
trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
trap.data = nv_rd32(dev, 0x40080c);
trap.data2 = nv_rd32(dev, 0x400810);
nouveau_graph_dump_trap_info(dev,
"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
}
nv_wr32(dev, 0x400808, 0);
} else if (display) {
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
}
nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
nv_wr32(dev, 0x400848, 0);
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
nv_wr32(dev, 0x400500, 0);
if (nv_rd32(dev, 0x40084c) & 0x80000000) {
if (display) {
if (nouveau_graph_trapped_channel(dev, &trap.channel))
trap.channel = -1;
trap.class = nv_rd32(dev, 0x400814);
trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
trap.data = nv_rd32(dev, 0x40085c);
trap.data2 = 0;
nouveau_graph_dump_trap_info(dev,
"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
}
nv_wr32(dev, 0x40084c, 0);
} else if (display) {
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
}
ustatus &= ~0x00000002;
}
if (ustatus && display)
NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
nv_wr32(dev, 0x400804, 0xc0000000);
nv_wr32(dev, 0x400108, 0x001);
status &= ~0x001;
}
/* TRAPs other than dispatch use the "normal" trap regs. */
if (status && display) {
nouveau_graph_trap_info(dev, &trap);
nouveau_graph_dump_trap_info(dev,
"PGRAPH_TRAP", &trap);
}
/* M2MF: Memory to memory copy engine. */
if (status & 0x002) {
ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
}
if (ustatus & 0x00000001) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
ustatus &= ~0x00000002;
}
if (ustatus & 0x00000004) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
ustatus &= ~0x00000004;
}
NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x406804),
nv_rd32(dev, 0x406808),
nv_rd32(dev, 0x40680c),
nv_rd32(dev, 0x406810));
if (ustatus && display)
NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
/* No sane way found yet -- just reset the bugger. */
nv_wr32(dev, 0x400040, 2);
nv_wr32(dev, 0x400040, 0);
nv_wr32(dev, 0x406800, 0xc0000000);
nv_wr32(dev, 0x400108, 0x002);
status &= ~0x002;
}
/* VFETCH: Fetches data from vertex buffers. */
if (status & 0x004) {
ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
}
if (ustatus & 0x00000001) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x400c00),
nv_rd32(dev, 0x400c08),
nv_rd32(dev, 0x400c0c),
nv_rd32(dev, 0x400c10));
ustatus &= ~0x00000001;
}
if (ustatus && display)
NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
nv_wr32(dev, 0x400c04, 0xc0000000);
nv_wr32(dev, 0x400108, 0x004);
status &= ~0x004;
}
/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
if (status & 0x008) {
ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
}
if (ustatus & 0x00000001) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
nv_rd32(dev, 0x401804),
nv_rd32(dev, 0x401808),
nv_rd32(dev, 0x40180c),
nv_rd32(dev, 0x401810));
ustatus &= ~0x00000001;
}
if (ustatus && display)
NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
/* No sane way found yet -- just reset the bugger. */
nv_wr32(dev, 0x400040, 0x80);
nv_wr32(dev, 0x400040, 0);
nv_wr32(dev, 0x401800, 0xc0000000);
nv_wr32(dev, 0x400108, 0x008);
status &= ~0x008;
}
/* CCACHE: Handles code and c[] caches and fills them. */
if (status & 0x010) {
ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
if (!ustatus && display) {
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
}
if (ustatus & 0x00000001) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
nv_rd32(dev, 0x405800),
nv_rd32(dev, 0x405804),
nv_rd32(dev, 0x405808),
nv_rd32(dev, 0x40580c),
nv_rd32(dev, 0x405810),
nv_rd32(dev, 0x405814),
nv_rd32(dev, 0x40581c));
ustatus &= ~0x00000001;
}
if (ustatus && display)
NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x400108, 0x010);
status &= ~0x010;
}
/* Unknown, not seen yet... 0x402000 is the only trap status reg
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
if (display)
NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
nv_wr32(dev, 0x402000, 0xc0000000);
/* no status modifiction on purpose */
}
/* TEXTURE: CUDA texturing units */
if (status & 0x040) {
nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
"PGRAPH_TRAP_TEXTURE");
nv_wr32(dev, 0x400108, 0x040);
status &= ~0x040;
}
/* MP: CUDA execution engines. */
if (status & 0x080) {
nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
"PGRAPH_TRAP_MP");
nv_wr32(dev, 0x400108, 0x080);
status &= ~0x080;
}
/* TPDMA: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
"PGRAPH_TRAP_TPDMA");
nv_wr32(dev, 0x400108, 0x100);
status &= ~0x100;
}
if (status) {
if (display)
NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
status);
nv_wr32(dev, 0x400108, status);
}
}
/* There must be a *lot* of these. Will take some time to gather them up. */
static struct nouveau_enum_names nv50_data_error_names[] =
{
{ 4, "INVALID_VALUE" },
{ 5, "INVALID_ENUM" },
{ 8, "INVALID_OBJECT" },
{ 0xc, "INVALID_BITFIELD" },
{ 0x28, "MP_NO_REG_SPACE" },
{ 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
};
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
struct nouveau_pgraph_trap trap;
int unhandled = 0;
uint32_t status;
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
/* NOTIFY: You've set a NOTIFY an a command and it's done. */
if (status & 0x00000001) {
nouveau_pgraph_intr_notify(dev, nsource);
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_NOTIFY", &trap);
status &= ~0x00000001;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
}
if (status & 0x00000010) {
nouveau_pgraph_intr_error(dev, nsource |
NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
* when you write 0x200 to 0x50c0 method 0x31c. */
if (status & 0x00000002) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_COMPUTE_QUERY", &trap);
status &= ~0x00000002;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
}
/* Unknown, never seen: 0x4 */
/* ILLEGAL_MTHD: You used a wrong method for this class. */
if (status & 0x00000010) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_pgraph_intr_swmthd(dev, &trap))
unhandled = 1;
if (unhandled && nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_ILLEGAL_MTHD", &trap);
status &= ~0x00000010;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
}
/* ILLEGAL_CLASS: You used a wrong class. */
if (status & 0x00000020) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_ILLEGAL_CLASS", &trap);
status &= ~0x00000020;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
}
/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
if (status & 0x00000040) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_DOUBLE_NOTIFY", &trap);
status &= ~0x00000040;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
}
/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
if (status & 0x00001000) {
nv_wr32(dev, 0x400500, 0x00000000);
nv_wr32(dev, NV03_PGRAPH_INTR,
@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
}
if (status & 0x00100000) {
nouveau_pgraph_intr_error(dev, nsource |
NV03_PGRAPH_NSOURCE_DATA_ERROR);
/* BUFFER_NOTIFY: Your m2mf transfer finished */
if (status & 0x00010000) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_BUFFER_NOTIFY", &trap);
status &= ~0x00010000;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
}
/* DATA_ERROR: Invalid value for this method, or invalid
* state in current PGRAPH context for this operation */
if (status & 0x00100000) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit()) {
nouveau_graph_dump_trap_info(dev,
"PGRAPH_DATA_ERROR", &trap);
NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
nouveau_print_enum_names(nv_rd32(dev, 0x400110),
nv50_data_error_names);
printk("\n");
}
status &= ~0x00100000;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
}
/* TRAP: Something bad happened in the middle of command
* execution. Has a billion types, subtypes, and even
* subsubtypes. */
if (status & 0x00200000) {
int r;
nouveau_pgraph_intr_error(dev, nsource |
NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
NV_ERROR(dev, "magic set 1:\n");
for (r = 0x408900; r <= 0x408910; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
nv_wr32(dev, 0x408900,
nv_rd32(dev, 0x408904) | 0xc0000000);
for (r = 0x408e08; r <= 0x408e24; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
nv_wr32(dev, 0x408e08,
nv_rd32(dev, 0x408e08) | 0xc0000000);
NV_ERROR(dev, "magic set 2:\n");
for (r = 0x409900; r <= 0x409910; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
nv_wr32(dev, 0x409900,
nv_rd32(dev, 0x409904) | 0xc0000000);
for (r = 0x409e08; r <= 0x409e24; r += 4)
NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
nv_rd32(dev, r));
nv_wr32(dev, 0x409e08,
nv_rd32(dev, 0x409e08) | 0xc0000000);
nv50_pgraph_trap_handler(dev);
status &= ~0x00200000;
nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
}
/* Unknown, never seen: 0x00400000 */
/* SINGLE_STEP: Happens on every method if you turned on
* single stepping in 40008c */
if (status & 0x01000000) {
nouveau_graph_trap_info(dev, &trap);
if (nouveau_ratelimit())
nouveau_graph_dump_trap_info(dev,
"PGRAPH_SINGLE_STEP", &trap);
status &= ~0x01000000;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
}
/* 0x02000000 happens when you pause a ctxprog...
* but the only way this can happen that I know is by
* poking the relevant MMIO register, and we don't
* do that. */
if (status) {
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
status);
@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
if (nv_rd32(dev, 0x400824) & (1 << 31))
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void

View File

@ -36,7 +36,6 @@
#include "nouveau_drm.h"
#include "nv50_display.h"
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_init_engine_ptrs(struct drm_device *dev)
@ -278,8 +277,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nouveau_stub_init;
engine->fb.takedown = nouveau_stub_takedown;
engine->fb.init = nv50_fb_init;
engine->fb.takedown = nv50_fb_takedown;
engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;

View File

@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_framebuffer *fb = crtc->fb;
/* Calculate our timings */
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
int horizStart = (mode->crtc_hsync_start >> 3) - 1;
int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
int horizStart = (mode->crtc_hsync_start >> 3) + 1;
int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
int horizTotal = (mode->crtc_htotal >> 3) - 5;
int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;

View File

@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
width = ALIGN(image->width, 32);
dsize = (width * image->height) >> 5;
width = ALIGN(image->width, 8);
dsize = ALIGN(width * image->height, 32) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | width);
OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
while (dsize) {

View File

@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev)
}
for (i = 0 ; i < dcb->connector.entries; i++) {
if (i != 0 && dcb->connector.entry[i].index ==
dcb->connector.entry[i - 1].index)
if (i != 0 && dcb->connector.entry[i].index2 ==
dcb->connector.entry[i - 1].index2)
continue;
nouveau_connector_create(dev, &dcb->connector.entry[i]);
}

View File

@ -0,0 +1,32 @@
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
int
nv50_fb_init(struct drm_device *dev)
{
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
struct drm_nouveau_private *dev_priv = dev->dev_private;
switch (dev_priv->chipset) {
case 0x50:
nv_wr32(dev, 0x100c90, 0x0707ff);
break;
case 0xa5:
case 0xa8:
nv_wr32(dev, 0x100c90, 0x0d0fff);
break;
default:
nv_wr32(dev, 0x100c90, 0x1d07ff);
break;
}
return 0;
}
void
nv50_fb_takedown(struct drm_device *dev)
{
}

View File

@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
BEGIN_RING(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x0840, 4);

View File

@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
static void
nv50_graph_init_regs__nv(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t units = nv_rd32(dev, 0x1540);
int i;
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x400804, 0xc0000000);
@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
for (i = 0; i < 16; i++) {
if (units & 1 << i) {
if (dev_priv->chipset < 0xa0) {
nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
} else {
nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
}
}
}
nv_wr32(dev, 0x400108, 0xffffffff);
nv_wr32(dev, 0x400824, 0x00004000);
@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nouveau_grctx_vals_load(dev, ctx);
}
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
if ((dev_priv->chipset & 0xf0) == 0xa0)
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
else
nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
dev_priv->engine.instmem.finish_access(dev);
return 0;

View File

@ -64,6 +64,9 @@
#define CP_FLAG_ALWAYS ((2 * 32) + 13)
#define CP_FLAG_ALWAYS_FALSE 0
#define CP_FLAG_ALWAYS_TRUE 1
#define CP_FLAG_INTR ((2 * 32) + 15)
#define CP_FLAG_INTR_NOT_PENDING 0
#define CP_FLAG_INTR_PENDING 1
#define CP_CTX 0x00100000
#define CP_CTX_COUNT 0x000f0000
@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_setup_save);
cp_set (ctx, UNK1D, SET);
cp_wait(ctx, STATUS, BUSY);
cp_wait(ctx, INTR, PENDING);
cp_bra (ctx, STATUS, BUSY, cp_setup_save);
cp_set (ctx, UNK01, SET);
cp_set (ctx, SWAP_DIRECTION, SAVE);
@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
int offset, base;
uint32_t units = nv_rd32 (ctx->dev, 0x1540);
/* 0800 */
/* 0800: DISPATCH */
cp_ctx(ctx, 0x400808, 7);
gr_def(ctx, 0x400814, 0x00000030);
cp_ctx(ctx, 0x400834, 0x32);
@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400b20, 0x0001629d);
}
/* 0C00 */
/* 0C00: VFETCH */
cp_ctx(ctx, 0x400c08, 0x2);
gr_def(ctx, 0x400c08, 0x0000fe0c);
@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x401540, 0x5);
gr_def(ctx, 0x401550, 0x00001018);
/* 1800 */
/* 1800: STREAMOUT */
cp_ctx(ctx, 0x401814, 0x1);
gr_def(ctx, 0x401814, 0x000000ff);
if (dev_priv->chipset == 0x50) {
@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
/* 6800 */
/* 6800: M2MF */
if (dev_priv->chipset < 0x90) {
cp_ctx(ctx, 0x406814, 0x2b);
gr_def(ctx, 0x406818, 0x00000f80);

View File

@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
radeon-y += radeon_device.o radeon_kms.o \
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \

View File

@ -53,15 +53,17 @@
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
int ps_shift;
uint16_t start;
unsigned last_jump;
unsigned long last_jump_jiffies;
bool abort;
} atom_exec_context;
int atom_debug = 0;
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@ -605,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8((*ptr)++);
int r = 0;
if (idx < ATOM_TABLE_NAMES_CNT)
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
if (r) {
ctx->abort = true;
}
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@ -674,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
{
int execute = 0, target = U16(*ptr);
unsigned long cjiffies;
(*ptr) += 2;
switch (arg) {
case ATOM_COND_ABOVE:
@ -701,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
if (execute)
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
if ((jiffies_to_msecs(cjiffies) > 1000)) {
DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
ctx->abort = true;
}
} else {
/* jiffies wrap around we will just wait a little longer */
ctx->last_jump_jiffies = jiffies;
}
} else {
ctx->last_jump = ctx->start + target;
ctx->last_jump_jiffies = jiffies;
}
*ptr = ctx->start + target;
}
}
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@ -1105,7 +1131,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@ -1113,7 +1139,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
atom_exec_context ectx;
if (!base)
return;
return -EINVAL;
len = CU16(base + ATOM_CT_SIZE_PTR);
ws = CU8(base + ATOM_CT_WS_PTR);
@ -1126,6 +1152,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
ectx.abort = false;
ectx.last_jump = 0;
if (ws)
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
else
@ -1138,6 +1166,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
if (ectx.abort) {
DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
base, len, ws, ps, ptr - 1);
return -EINVAL;
}
if (op < ATOM_OP_CNT && op > 0)
opcode_table[op].func(&ectx, &ptr,
@ -1153,10 +1186,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
if (ws)
kfree(ectx.ws);
return 0;
}
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
@ -1164,8 +1200,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
atom_execute_table_locked(ctx, index, params);
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
}
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@ -1249,9 +1286,7 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
atom_execute_table(ctx, ATOM_CMD_INIT, ps);
return 0;
return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
}
void atom_destroy(struct atom_context *ctx)
@ -1261,12 +1296,16 @@ void atom_destroy(struct atom_context *ctx)
kfree(ctx);
}
void atom_parse_data_header(struct atom_context *ctx, int index,
bool atom_parse_data_header(struct atom_context *ctx, int index,
uint16_t * size, uint8_t * frev, uint8_t * crev,
uint16_t * data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
if (!mdt[index])
return false;
if (size)
*size = CU16(idx);
@ -1275,38 +1314,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
if (crev)
*crev = CU8(idx + 3);
*data_start = idx;
return;
return true;
}
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
uint8_t * crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
if (!mct[index])
return false;
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
return;
return true;
}
int atom_allocate_fb_scratch(struct atom_context *ctx)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
int usage_bytes;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
DRM_DEBUG("atom firmware requested %08x %dkb\n",
firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */

View File

@ -140,11 +140,13 @@ struct atom_context {
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
void atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
bool atom_parse_cmd_header(struct atom_context *ctx, int index,
uint8_t *frev, uint8_t *crev);
int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"

View File

@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_disable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
if (ASIC_IS_DCE4(rdev)) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
break;
case ATOM_PPLL2:
ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
break;
case ATOM_PPLL2:
ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
ss_cntl &= ~1;
WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
return;
}
}
}
union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS legacy;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
};
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
static void atombios_enable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
step = dig->ss->step;
delay = dig->ss->delay;
range = dig->ss->range;
} else if (enable)
} else
return;
} else if (enable)
} else
return;
break;
}
@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
args.v1.ucSpreadSpectrumDelay = delay;
args.v1.ucSpreadSpectrumRange = range;
args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
args.v1.ucEnable = enable;
args.v1.ucEnable = ATOM_ENABLE;
} else {
args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
args.legacy.ucSpreadSpectrumType = type;
args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
args.legacy.ucEnable = enable;
args.legacy.ucEnable = ATOM_ENABLE;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
/* LVDS PLL quirks */
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
pll->algo = dig->pll_algo;
}
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
int index;
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return adjusted_clock;
memset(&args, 0, sizeof(args));
@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
/* may want to enable SS on DP/eDP eventually */
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (mode->clock > 165000)
/*args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;*/
if (encoder_mode == ATOM_ENCODER_MODE_DP)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
DISPPLL_CONFIG_COHERENT_MODE;
else {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
}
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
memset(&args, 0, sizeof(args));
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
switch (frev) {
case 1:
@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
&ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
switch (frev) {
case 1:
@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
atombios_set_ss(crtc, 0);
atombios_disable_ss(crtc);
/* always set DCPLL */
if (ASIC_IS_DCE4(rdev))
atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_set_ss(crtc, 1);
atombios_enable_ss(crtc);
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}

View File

@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
/* disable the training pattern on the source */
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,

View File

@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
@ -437,7 +438,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
int evergreen_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int chansize, numchan;
@ -482,12 +482,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
radeon_update_bandwidth_info(rdev);
return 0;
}
@ -747,6 +743,7 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
evergreen_suspend(rdev);
#if 0
r600_blit_fini(rdev);

View File

@ -32,6 +32,7 @@
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r100d.h"
#include "rs100d.h"
#include "rv200d.h"
@ -236,9 +237,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
void r100_pci_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r100_pci_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
radeon_gart_fini(rdev);
}
int r100_irq_set(struct radeon_device *rdev)
@ -313,10 +314,12 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
@ -742,6 +745,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
/* protect against crazy HW on resume */
rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@ -1805,6 +1810,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
u32 tmp;
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
@ -1876,6 +1882,12 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
}
/* switch PM block to ACPI mode */
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
tmp &= ~RADEON_PM_MODE_SEL;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
}
/*
@ -2023,6 +2035,7 @@ void r100_mc_init(struct radeon_device *rdev)
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
@ -2386,6 +2399,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@ -2414,11 +2429,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
* determine is there is enough bw for current mode
*/
mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
temp_ff.full = rfixed_const(100);
mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
sclk_ff = rdev->pm.sclk;
mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
@ -3441,6 +3453,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);

View File

@ -30,6 +30,7 @@
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r100d.h"
#include "r200_reg_safe.h"

View File

@ -31,6 +31,7 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include "r100_track.h"
#include "r300d.h"
@ -165,9 +166,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv370_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
void r300_fence_ring_emit(struct radeon_device *rdev,
@ -482,6 +483,7 @@ void r300_mc_init(struct radeon_device *rdev)
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@ -1335,6 +1337,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);

View File

@ -30,6 +30,7 @@
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r100d.h"
#include "r420d.h"
@ -267,6 +268,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);

View File

@ -27,6 +27,7 @@
*/
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "r520d.h"
@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev)
void r520_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
radeon_update_bandwidth_info(rdev);
}
void r520_mc_program(struct radeon_device *rdev)

View File

@ -32,6 +32,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
@ -492,9 +493,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
void r600_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
radeon_gart_fini(rdev);
}
void r600_agp_enable(struct radeon_device *rdev)
@ -676,7 +677,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
u32 tmp;
int chansize, numchan;
@ -720,14 +720,10 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_update_bandwidth_info(rdev);
return 0;
}
@ -1133,6 +1129,7 @@ void r600_gpu_init(struct radeon_device *rdev)
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@ -2120,6 +2117,7 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@ -2399,19 +2397,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD4_INT_CONTROL, tmp);
if (ASIC_IS_DCE32(rdev)) {
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, 0);
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, 0);
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
@ -2766,6 +2764,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@ -2787,6 +2786,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@ -2835,14 +2835,14 @@ restart_ih:
break;
case 10:
if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
disp_int_cont &= ~DC_HPD5_INTERRUPT;
disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
disp_int_cont &= ~DC_HPD6_INTERRUPT;
disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}

View File

@ -181,41 +181,6 @@ int r600_audio_init(struct radeon_device *rdev)
return 0;
}
/*
* determin how the encoders and audio interface is wired together
*/
int r600_audio_tmds_index(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *other;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
return 0;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
/* special case check if an TMDS1 is present */
list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
if (to_radeon_encoder(other)->encoder_id ==
ENCODER_OBJECT_ID_INTERNAL_TMDS1)
return 1;
}
return 0;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
return 1;
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return -1;
}
}
/*
* atach the audio codec to the clock source of the encoder
*/
@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
switch (r600_audio_tmds_index(encoder)) {
switch (dig->dig_encoder) {
case 0:
WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
WREG32(R600_AUDIO_PLL1_DIV, clock*100);
WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
break;
case 1:
WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
WREG32(R600_AUDIO_PLL2_DIV, clock*100);
WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
default:
dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
}

Some files were not shown because too many files have changed in this diff Show More