1
0
Fork 0

Xtensa patchset for v3.11-rc1

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJR3gHDAAoJEI9vqH3mFV2slu4QAIRGqonB3fbRi8OnBJDwrpDY
 4U8MG6uvz1tReBI+BdSXBCxF2CSCINxt2pDzEmQo+1suBfXRDOa5haCUHoLWEMHS
 za2i5bOdsN1U8fio1OFRFn6hdP7L5Xy2YBTZN5ufj9pYA1hkaECdMufWUkM7hklY
 H21zomT+NLiOEAgB5W4JpHZDzjmjwONPebwBiphi0wnWJY0TttK2huZQ/iAK79Fl
 nu0c8cQ2YG++NSnAgTQ0mmXrZNLp4zb40CJwMdOR1tWllYayhuypJdymfwFX7LjJ
 YU5MrAPWNjDAzuGYPnFiR9dli26B7aODpBWTanyWYq1HwKxk3RGQjzty/D1izmWh
 te4qtAhB/sYCgpdMpiMH1bGg6QqkvSJL8raxktFQ1N8ZwuiR3x1Zj6DGI0U3kGLT
 7P194njxIV1+4lCuAfVLTzXiYWV5nrtp2mrT6y1hIzWIolvXqDmJvnJzDTMavihL
 lD2AaLwrkF8rx8kmAFLLA4KqdrmD0IQp79UOyy9KMKhhk9SAD4FqyfuwjsYUrSsN
 vd2QqBxjE8GAc4MYIqqzIqTIvhfDtNk3cFXGn/lR26AW7H5LWT8DKkB0q67WMAJi
 /gvGpfnYED+bD2y0DfkCopGnpNWWXyXf1/mifbZH3jNvaWYAplKpy7eVuvPLgg1E
 7eyVBIVnS2iNYkFAy5jG
 =qScd
 -----END PGP SIGNATURE-----

Merge tag 'xtensa-next-20130710' of git://github.com/czankel/xtensa-linux

Pull Xtensa updates from Chris Zankel.

* tag 'xtensa-next-20130710' of git://github.com/czankel/xtensa-linux: (22 commits)
  xtensa: remove the second argument of __bio_kmap_atomic()
  xtensa: add static function tracer support
  xtensa: Flat DeviceTree copy not future-safe
  xtensa: check TLB sanity on return to userspace
  xtensa: adjust boot parameters address when INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is selected
  xtensa: bootparams: fix typo
  xtensa: tell git to ignore generated .dtb files
  xtensa: ccount based sched_clock
  xtensa: ccount based clockevent implementation
  xtensa: consolidate ccount access routines
  xtensa: cleanup ccount frequency tracking
  xtensa: timex.h: remove unused symbols
  xtensa: tell git to ignore copied zlib source files
  xtensa: fix section mismatch in pcibios_fixup_bus
  xtensa: ISS: fix section mismatch in iss_net_setup
  arch: xtensa: include: asm: compiling issue, need cmpxchg64() defined.
  xtensa: xtfpga: fix section mismatch
  xtensa: remove unused platform_init_irq()
  xtensa: tell git to ignore generated files
  xtensa: flush TLB entries for pages of non-current mm correctly
  ...
hifive-unleashed-5.1
Linus Torvalds 2013-07-11 12:30:33 -07:00
commit d4d1cda6ef
29 changed files with 435 additions and 148 deletions

View File

@ -6,10 +6,12 @@ config XTENSA
select ARCH_WANT_FRAME_POINTERS
select HAVE_IDE
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select HAVE_GENERIC_HARDIRQS
select VIRT_TO_BUS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_SCHED_CLOCK
select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP
select ARCH_WANT_IPC_PARSE_VERSION
@ -17,6 +19,7 @@ config XTENSA
select CLONE_BACKWARDS
select IRQ_DOMAIN
select HAVE_OPROFILE
select HAVE_FUNCTION_TRACER
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both

View File

@ -2,6 +2,16 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
config DEBUG_TLB_SANITY
bool "Debug TLB sanity"
depends on DEBUG_KERNEL
help
Enable this to turn on TLB sanity check on each entry to userspace.
This check can spot missing TLB invalidation/wrong PTE permissions/
premature page freeing.
If unsure, say N.
config LD_NO_RELAX
bool "Disable linker relaxation"
default n

3
arch/xtensa/boot/.gitignore vendored 100644
View File

@ -0,0 +1,3 @@
uImage
zImage.redboot
*.dtb

View File

@ -0,0 +1 @@
boot.lds

View File

@ -0,0 +1,3 @@
inffast.c
inflate.c
inftrees.c

View File

@ -7,6 +7,13 @@ zlib := inffast.c inflate.c inftrees.c
lib-y += $(zlib:.c=.o) zmem.o
ccflags-y := -Ilib/zlib_inflate
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_inflate.o = -pg
CFLAGS_REMOVE_zmem.o = -pg
CFLAGS_REMOVE_inftrees.o = -pg
CFLAGS_REMOVE_inffast.o = -pg
endif
quiet_cmd_copy_zlib = COPY $@
cmd_copy_zlib = cat $< > $@

View File

@ -20,7 +20,7 @@
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_BAUDRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
#define BP_TAG_FDT 0x1006 /* flat device tree addr */

View File

@ -93,6 +93,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32

View File

@ -12,7 +12,7 @@
#ifndef _XTENSA_DELAY_H
#define _XTENSA_DELAY_H
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/param.h>
extern unsigned long loops_per_jiffy;
@ -24,24 +24,17 @@ static inline void __delay(unsigned long loops)
: "=r" (loops) : "0" (loops));
}
static __inline__ u32 xtensa_get_ccount(void)
{
u32 ccount;
asm volatile ("rsr %0, ccount\n" : "=r" (ccount));
return ccount;
}
/* For SMP/NUMA systems, change boot_cpu_data to something like
* local_cpu_data->... where local_cpu_data points to the current
* cpu. */
static __inline__ void udelay (unsigned long usecs)
{
unsigned long start = xtensa_get_ccount();
unsigned long start = get_ccount();
unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ));
/* Note: all variables are unsigned (can wrap around)! */
while (((unsigned long)xtensa_get_ccount()) - start < cycles)
while (((unsigned long)get_ccount()) - start < cycles)
;
}

View File

@ -13,6 +13,7 @@
#include <asm/processor.h>
#define HAVE_ARCH_CALLER_ADDR
#ifndef __ASSEMBLY__
#define CALLER_ADDR0 ({ unsigned long a0, a1; \
__asm__ __volatile__ ( \
"mov %0, a0\n" \
@ -24,10 +25,22 @@ extern unsigned long return_address(unsigned level);
#define CALLER_ADDR1 return_address(1)
#define CALLER_ADDR2 return_address(2)
#define CALLER_ADDR3 return_address(3)
#else
#else /* CONFIG_FRAME_POINTER */
#define CALLER_ADDR1 (0)
#define CALLER_ADDR2 (0)
#define CALLER_ADDR3 (0)
#endif
#endif /* CONFIG_FRAME_POINTER */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 3
#ifndef __ASSEMBLY__
extern void _mcount(void);
#define mcount _mcount
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
#endif /* _XTENSA_FTRACE_H */

View File

@ -5,7 +5,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2007 Tensilica Inc.
* Copyright (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_PGTABLE_H
@ -64,41 +64,82 @@
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
*/
#define VMALLOC_START 0xC0000000
#define VMALLOC_END 0xC7FEFFFF
#define TLBTEMP_BASE_1 0xC7FF0000
#define TLBTEMP_BASE_2 0xC7FF8000
/*
* Xtensa Linux config PTE layout (when present):
* 31-12: PPN
* 11-6: Software
* 5-4: RING
* 3-0: CA
* For the Xtensa architecture, the PTE layout is as follows:
*
* Similar to the Alpha and MIPS ports, we need to keep track of the ref
* and mod bits in software. We have a software "you can read
* from this page" bit, and a hardware one which actually lets the
* process read from the page. On the same token we have a software
* writable bit and the real hardware one which actually lets the
* process write to the page.
* 31------12 11 10-9 8-6 5-4 3-2 1-0
* +-----------------------------------------+
* | | Software | HARDWARE |
* | PPN | ADW | RI |Attribute|
* +-----------------------------------------+
* pte_none | MBZ | 01 | 11 | 00 |
* +-----------------------------------------+
* present | PPN | 0 | 00 | ADW | RI | CA | wx |
* +- - - - - - - - - - - - - - - - - - - - -+
* (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 11 | 11 |
* +-----------------------------------------+
* swap | index | type | 01 | 11 | 00 |
* +- - - - - - - - - - - - - - - - - - - - -+
* file | file offset | 01 | 11 | 10 |
* +-----------------------------------------+
*
* See further below for PTE layout for swapped-out pages.
* For T1050 hardware and earlier the layout differs for present and (PAGE_NONE)
* +-----------------------------------------+
* present | PPN | 0 | 00 | ADW | RI | CA | w1 |
* +-----------------------------------------+
* (PAGE_NONE)| PPN | 0 | 00 | ADW | 01 | 01 | 00 |
* +-----------------------------------------+
*
* Legend:
* PPN Physical Page Number
* ADW software: accessed (young) / dirty / writable
* RI ring (0=privileged, 1=user, 2 and 3 are unused)
* CA cache attribute: 00 bypass, 01 writeback, 10 writethrough
* (11 is invalid and used to mark pages that are not present)
* w page is writable (hw)
* x page is executable (hw)
* index swap offset / PAGE_SIZE (bit 11-31: 21 bits -> 8 GB)
* (note that the index is always non-zero)
* type swap type (5 bits -> 32 types)
* file offset 26-bit offset into the file, in increments of PAGE_SIZE
*
* Notes:
* - (PROT_NONE) is a special case of 'present' but causes an exception for
* any access (read, write, and execute).
* - 'multihit-exception' has the highest priority of all MMU exceptions,
* so the ring must be set to 'RING_USER' even for 'non-present' pages.
* - on older hardware, the exectuable flag was not supported and
* used as a 'valid' flag, so it needs to be always set.
* - we need to keep track of certain flags in software (dirty and young)
* to do this, we use write exceptions and have a separate software w-flag.
* - attribute value 1101 (and 1111 on T1050 and earlier) is reserved
*/
#define _PAGE_ATTRIB_MASK 0xf
#define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
#define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
#define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
#define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
/* None of these cache modes include MP coherency: */
#define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
#define _PAGE_CA_WB (1<<2) /* write-back */
#define _PAGE_CA_WT (2<<2) /* write-through */
#define _PAGE_CA_MASK (3<<2)
#define _PAGE_INVALID (3<<2)
#define _PAGE_CA_INVALID (3<<2)
/* We use invalid attribute values to distinguish special pte entries */
#if XCHAL_HW_VERSION_MAJOR < 2000
#define _PAGE_HW_VALID 0x01 /* older HW needed this bit set */
#define _PAGE_NONE 0x04
#else
#define _PAGE_HW_VALID 0x00
#define _PAGE_NONE 0x0f
#endif
#define _PAGE_FILE (1<<1) /* file mapped page, only if !present */
#define _PAGE_USER (1<<4) /* user access (ring=1) */
@ -108,19 +149,12 @@
#define _PAGE_DIRTY (1<<7) /* software: page dirty */
#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
/* On older HW revisions, we always have to set bit 0 */
#if XCHAL_HW_VERSION_MAJOR < 2000
# define _PAGE_VALID (1<<0)
#else
# define _PAGE_VALID 0
#endif
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
#ifdef CONFIG_MMU
#define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_PRESENT (_PAGE_HW_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_NONE | _PAGE_USER)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
@ -132,9 +166,9 @@
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_BYPASS)
#else
# define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
# define _PAGE_DIRECTORY (_PAGE_HW_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
#endif
#else /* no mmu */
@ -202,12 +236,16 @@ static inline void pgtable_cache_init(void) { }
/*
* pte status.
*/
#define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
#define pte_present(pte) \
(((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
|| ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
# define pte_none(pte) (pte_val(pte) == (_PAGE_CA_INVALID | _PAGE_USER))
#if XCHAL_HW_VERSION_MAJOR < 2000
# define pte_present(pte) ((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID)
#else
# define pte_present(pte) \
(((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_CA_INVALID) \
|| ((pte_val(pte) & _PAGE_ATTRIB_MASK) == _PAGE_NONE))
#endif
#define pte_clear(mm,addr,ptep) \
do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
do { update_pte(ptep, __pte(_PAGE_CA_INVALID | _PAGE_USER)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
@ -328,35 +366,23 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
/*
* Encode and decode a swap entry.
*
* Format of swap pte:
* bit 0 MBZ
* bit 1 page-file (must be zero)
* bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
* bits 4 - 5 ring protection (must be 01: _PAGE_USER)
* bits 6 - 10 swap type (5 bits -> 32 types)
* bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
* Format of file pte:
* bit 0 MBZ
* bit 1 page-file (must be one: _PAGE_FILE)
* bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
* bits 4 - 5 ring protection (must be 01: _PAGE_USER)
* bits 6 - 31 file offset / PAGE_SIZE
* Encode and decode a swap and file entry.
*/
#define SWP_TYPE_BITS 5
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
#define __swp_type(entry) (((entry).val >> 6) & 0x1f)
#define __swp_offset(entry) ((entry).val >> 11)
#define __swp_entry(type,offs) \
((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
((swp_entry_t){((type) << 6) | ((offs) << 11) | \
_PAGE_CA_INVALID | _PAGE_USER})
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define PTE_FILE_MAX_BITS 28
#define pte_to_pgoff(pte) (pte_val(pte) >> 4)
#define PTE_FILE_MAX_BITS 26
#define pte_to_pgoff(pte) (pte_val(pte) >> 6)
#define pgoff_to_pte(off) \
((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
((pte_t) { ((off) << 6) | _PAGE_CA_INVALID | _PAGE_FILE | _PAGE_USER })
#endif /* !defined (__ASSEMBLY__) */

View File

@ -29,11 +29,6 @@ extern void platform_init(bp_tag_t*);
*/
extern void platform_setup (char **);
/*
* platform_init_irq is called from init_IRQ.
*/
extern void platform_init_irq (void);
/*
* platform_restart is called to restart the system.
*/

View File

@ -35,19 +35,11 @@
# error "Bad timer number for Linux configurations!"
#endif
#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
extern unsigned long ccount_per_jiffy;
extern unsigned long nsec_per_ccount;
#define CCOUNT_PER_JIFFY ccount_per_jiffy
#define NSEC_PER_CCOUNT nsec_per_ccount
extern unsigned long ccount_freq;
#define CCOUNT_PER_JIFFY (ccount_freq / HZ)
#else
#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
#define NSEC_PER_CCOUNT (1000UL / CONFIG_XTENSA_CPU_CLOCK)
#endif

1
arch/xtensa/kernel/.gitignore vendored 100644
View File

@ -0,0 +1 @@
vmlinux.lds

View File

@ -11,6 +11,7 @@ obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
AFLAGS_head.o += -mtext-section-literals

View File

@ -458,7 +458,7 @@ common_exception_return:
_bbsi.l a4, TIF_NEED_RESCHED, 3f
_bbsi.l a4, TIF_NOTIFY_RESUME, 2f
_bbci.l a4, TIF_SIGPENDING, 4f
_bbci.l a4, TIF_SIGPENDING, 5f
2: l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
@ -476,6 +476,13 @@ common_exception_return:
callx4 a4
j 1b
5:
#ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
movi a4, check_tlb_sanity
callx4 a4
#endif
4: /* Restore optional registers. */
load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
@ -1792,10 +1799,15 @@ ENTRY(fast_store_prohibited)
l32i a0, a0, 0
beqz a0, 2f
/* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/
/*
* Note that we test _PAGE_WRITABLE_BIT only if PTE is present
* and is not PAGE_NONE. See pgtable.h for possible PTE layouts.
*/
_PTE_OFFSET(a0, a1, a4)
l32i a4, a0, 0 # read pteval
movi a1, _PAGE_CA_INVALID
ball a4, a1, 2f
bbci.l a4, _PAGE_WRITABLE_BIT, 2f
movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE

View File

@ -68,6 +68,15 @@ _SetupMMU:
#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
initialize_mmu
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
rsr a2, excsave1
movi a3, 0x08000000
bgeu a2, a3, 1f
movi a3, 0xd0000000
add a2, a2, a3
wsr a2, excsave1
1:
#endif
#endif
.end no-absolute-literals

View File

@ -0,0 +1,50 @@
/*
* arch/xtensa/kernel/mcount.S
*
* Xtensa specific mcount support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Tensilica Inc.
*/
#include <linux/linkage.h>
#include <asm/ftrace.h>
/*
* Entry condition:
*
* a2: a0 of the caller
*/
ENTRY(_mcount)
entry a1, 16
movi a4, ftrace_trace_function
l32i a4, a4, 0
movi a3, ftrace_stub
bne a3, a4, 1f
retw
1: xor a7, a2, a1
movi a3, 0x3fffffff
and a7, a7, a3
xor a7, a7, a1
xor a6, a0, a1
and a6, a6, a3
xor a6, a6, a1
addi a6, a6, -MCOUNT_INSN_SIZE
callx4 a4
retw
ENDPROC(_mcount)
ENTRY(ftrace_stub)
entry a1, 16
retw
ENDPROC(ftrace_stub)

View File

@ -77,9 +77,9 @@ pcibios_align_resource(void *data, const struct resource *res,
if (res->flags & IORESOURCE_IO) {
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
" (%ld bytes)\n", pci_name(dev),
dev->resource - res, size);
pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
pci_name(dev), dev->resource - res,
size);
}
if (start & 0x300)
@ -174,7 +174,7 @@ static int __init pcibios_init(void)
struct pci_controller *pci_ctrl;
struct list_head resources;
struct pci_bus *bus;
int next_busno = 0, i;
int next_busno = 0;
printk("PCI: Probing PCI hardware\n");
@ -197,7 +197,7 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
void __init pcibios_fixup_bus(struct pci_bus *bus)
void pcibios_fixup_bus(struct pci_bus *bus)
{
if (bus->parent) {
/* This is a subordinate bridge */

View File

@ -29,7 +29,6 @@
*/
_F(void, setup, (char** cmd), { });
_F(void, init_irq, (void), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
@ -42,6 +41,6 @@ _F(void, pcibios_init, (void), { });
_F(void, calibrate_ccount, (void),
{
pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n");
ccount_per_jiffy = 10 * (1000000UL/HZ);
ccount_freq = 10 * 1000000UL;
});
#endif

View File

@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag)
{
meminfo_t* mi;
mi = (meminfo_t*)(tag->data);
initrd_start = (void*)(mi->start);
initrd_end = (void*)(mi->end);
initrd_start = __va(mi->start);
initrd_end = __va(mi->end);
return 0;
}
@ -164,7 +164,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_initrd);
static int __init parse_tag_fdt(const bp_tag_t *tag)
{
dtb_start = (void *)(tag->data[0]);
dtb_start = __va(tag->data[0]);
return 0;
}
@ -256,7 +256,7 @@ void __init early_init_devtree(void *params)
static void __init copy_devtree(void)
{
void *alloc = early_init_dt_alloc_memory_arch(
be32_to_cpu(initial_boot_params->totalsize), 0);
be32_to_cpu(initial_boot_params->totalsize), 8);
if (alloc) {
memcpy(alloc, initial_boot_params,
be32_to_cpu(initial_boot_params->totalsize));

View File

@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
@ -23,13 +24,13 @@
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/irqdomain.h>
#include <linux/sched_clock.h>
#include <asm/timex.h>
#include <asm/platform.h>
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy; /* per 1/HZ */
unsigned long nsec_per_ccount; /* nsec per ccount increment */
unsigned long ccount_freq; /* ccount Hz */
#endif
static cycle_t ccount_read(struct clocksource *cs)
@ -37,6 +38,11 @@ static cycle_t ccount_read(struct clocksource *cs)
return (cycle_t)get_ccount();
}
static u32 notrace ccount_sched_clock_read(void)
{
return get_ccount();
}
static struct clocksource ccount_clocksource = {
.name = "ccount",
.rating = 200,
@ -44,29 +50,98 @@ static struct clocksource ccount_clocksource = {
.mask = CLOCKSOURCE_MASK(32),
};
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev);
static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt);
static struct ccount_timer_t {
struct clock_event_device evt;
int irq_enabled;
} ccount_timer = {
.evt = {
.name = "ccount_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 300,
.set_next_event = ccount_timer_set_next_event,
.set_mode = ccount_timer_set_mode,
},
};
static int ccount_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
unsigned long flags, next;
int ret = 0;
local_irq_save(flags);
next = get_ccount() + delta;
set_linux_timer(next);
if (next - get_ccount() > delta)
ret = -ETIME;
local_irq_restore(flags);
return ret;
}
static void ccount_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
struct ccount_timer_t *timer =
container_of(evt, struct ccount_timer_t, evt);
/*
* There is no way to disable the timer interrupt at the device level,
* only at the intenable register itself. Since enable_irq/disable_irq
* calls are nested, we need to make sure that these calls are
* balanced.
*/
switch (mode) {
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
if (timer->irq_enabled) {
disable_irq(evt->irq);
timer->irq_enabled = 0;
}
break;
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_ONESHOT:
if (!timer->irq_enabled) {
enable_irq(evt->irq);
timer->irq_enabled = 1;
}
default:
break;
}
}
static irqreturn_t timer_interrupt(int irq, void *dev_id);
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.flags = IRQF_TIMER,
.name = "timer",
.dev_id = &ccount_timer,
};
void __init time_init(void)
{
unsigned int irq;
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency ");
platform_calibrate_ccount();
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
(int)(ccount_per_jiffy/(10000/HZ))%100);
printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
(int)(ccount_freq/10000)%100);
#endif
clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ);
/* Initialize the linux timer interrupt. */
ccount_timer.evt.cpumask = cpumask_of(0);
ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
if (WARN(!ccount_timer.evt.irq, "error: can't map timer irq"))
return;
clockevents_config_and_register(&ccount_timer.evt, ccount_freq, 0xf,
0xffffffff);
setup_irq(ccount_timer.evt.irq, &timer_irqaction);
ccount_timer.irq_enabled = 1;
irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
setup_irq(irq, &timer_irqaction);
set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
setup_sched_clock(ccount_sched_clock_read, 32, ccount_freq);
}
/*
@ -75,36 +150,14 @@ void __init time_init(void)
irqreturn_t timer_interrupt (int irq, void *dev_id)
{
struct ccount_timer_t *timer = dev_id;
struct clock_event_device *evt = &timer->evt;
unsigned long next;
next = get_linux_timer();
again:
while ((signed long)(get_ccount() - next) > 0) {
profile_tick(CPU_PROFILING);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
xtime_update(1); /* Linux handler in kernel/time/timekeeping */
/* Note that writing CCOMPARE clears the interrupt. */
next += CCOUNT_PER_JIFFY;
set_linux_timer(next);
}
evt->event_handler(evt);
/* Allow platform to do something useful (Wdog). */
platform_heartbeat();
/* Make sure we didn't miss any tick... */
if ((signed long)(get_ccount() - next) > 0)
goto again;
return IRQ_HANDLED;
}

View File

@ -124,3 +124,7 @@ extern long common_exception_return;
extern long _spill_registers;
EXPORT_SYMBOL(common_exception_return);
EXPORT_SYMBOL(_spill_registers);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
#endif

View File

@ -64,7 +64,7 @@ void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm) {
unsigned long flags;
local_save_flags(flags);
local_irq_save(flags);
__get_new_mmu_context(mm);
__load_mmu_context(mm);
local_irq_restore(flags);
@ -94,7 +94,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
(unsigned long)mm->context, start, end);
#endif
local_save_flags(flags);
local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register();
@ -128,9 +128,10 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
if(mm->context == NO_CONTEXT)
return;
local_save_flags(flags);
local_irq_save(flags);
oldpid = get_rasid_register();
set_rasid_register(ASID_INSERT(mm->context));
if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page);
@ -140,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags);
}
#ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr)
{
struct task_struct *task = get_current();
struct mm_struct *mm = task->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
if (!mm)
mm = task->active_mm;
pgd = pgd_offset(mm, vaddr);
if (pgd_none_or_clear_bad(pgd))
return 0;
pmd = pmd_offset(pgd, vaddr);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_map(pmd, vaddr);
if (!pte)
return 0;
return pte_val(*pte);
}
enum {
TLB_SUSPICIOUS = 1,
TLB_INSANE = 2,
};
static void tlb_insane(void)
{
BUG_ON(1);
}
static void tlb_suspicious(void)
{
WARN_ON(1);
}
/*
* Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
* and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
*
* Check that valid TLB entries either have the same PA as the PTE, or PTE is
* marked as non-present. Non-present PTE and the page with non-zero refcount
* and zero mapcount is normal for batched TLB flush operation. Zero refcount
* means that the page was freed prematurely. Non-zero mapcount is unusual,
* but does not necessary means an error, thus marked as suspicious.
*/
static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
{
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
unsigned tlb_asid = r0 & ASID_MASK;
bool kernel = tlb_asid == 1;
int rc = 0;
if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
dtlb ? 'D' : 'I', w, e, vpn,
kernel ? "kernel" : "user");
rc |= TLB_INSANE;
}
if (tlb_asid == mm_asid) {
unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
read_itlb_translation(tlbidx);
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
if (pte == 0 || !pte_present(__pte(pte))) {
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
pr_err("page refcount: %d, mapcount: %d\n",
page_count(p),
page_mapcount(p));
if (!page_count(p))
rc |= TLB_INSANE;
else if (page_mapped(p))
rc |= TLB_SUSPICIOUS;
} else {
rc |= TLB_INSANE;
}
}
}
return rc;
}
void check_tlb_sanity(void)
{
unsigned long flags;
unsigned w, e;
int bug = 0;
local_irq_save(flags);
for (w = 0; w < DTLB_ARF_WAYS; ++w)
for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
bug |= check_tlb_entry(w, e, true);
for (w = 0; w < ITLB_ARF_WAYS; ++w)
for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
bug |= check_tlb_entry(w, e, false);
if (bug & TLB_INSANE)
tlb_insane();
if (bug & TLB_SUSPICIOUS)
tlb_suspicious();
local_irq_restore(flags);
}
#endif /* CONFIG_DEBUG_TLB_SANITY */

View File

@ -700,7 +700,7 @@ struct iss_net_init {
#define ERR KERN_ERR "iss_net_setup: "
static int iss_net_setup(char *str)
static int __init iss_net_setup(char *str)
{
struct iss_net_private *device = NULL;
struct iss_net_init *new;

View File

@ -108,13 +108,13 @@ static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
sector_t sector = bio->bi_sector;
bio_for_each_segment(bvec, bio, i) {
char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
char *buffer = __bio_kmap_atomic(bio, i);
unsigned len = bvec->bv_len >> SECTOR_SHIFT;
simdisk_transfer(dev, sector, len, buffer,
bio_data_dir(bio) == WRITE);
sector += len;
__bio_kunmap_atomic(bio, KM_USER0);
__bio_kunmap_atomic(bio);
}
return 0;
}

View File

@ -163,7 +163,7 @@ void platform_heartbeat(void)
#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
void platform_calibrate_ccount(void)
void __init platform_calibrate_ccount(void)
{
long clk_freq = 0;
#ifdef CONFIG_OF
@ -179,8 +179,7 @@ void platform_calibrate_ccount(void)
if (!clk_freq)
clk_freq = *(long *)XTFPGA_CLKFRQ_VADDR;
ccount_per_jiffy = clk_freq / HZ;
nsec_per_ccount = 1000000000UL / clk_freq;
ccount_freq = clk_freq;
}
#endif

View File

@ -1,4 +1,3 @@
#include <asm/delay.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <variant/hardware.h>
@ -17,11 +16,10 @@ void platform_calibrate_ccount(void)
"1: l32i %0, %2, 0 ;"
" beq %0, %1, 1b ;"
: "=&a"(u) : "a"(t), "a"(tstamp));
b = xtensa_get_ccount();
b = get_ccount();
if (i == LOOPS)
a = b;
} while (--i >= 0);
b -= a;
nsec_per_ccount = (LOOPS * 10000) / b;
ccount_per_jiffy = b * (100000UL / (LOOPS * HZ));
ccount_freq = b * (100000UL / LOOPS);
}

View File

@ -97,11 +97,11 @@ static inline void *bio_data(struct bio *bio)
* permanent PIO fall back, user is probably better off disabling highmem
* I/O completely on that queue (see ide-dma for example)
*/
#define __bio_kmap_atomic(bio, idx, kmtype) \
#define __bio_kmap_atomic(bio, idx) \
(kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
bio_iovec_idx((bio), (idx))->bv_offset)
#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
/*
* merge helpers etc