Merge branch 'sh/pmb-dynamic'

This commit is contained in:
Paul Mundt 2010-02-18 18:35:20 +09:00
commit 77f36fcc03
20 changed files with 600 additions and 311 deletions

View file

@ -117,7 +117,7 @@ void decompress_kernel(void)
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_LEGACY)
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif

View file

@ -133,6 +133,28 @@ static inline void ctrl_delay(void)
__raw_readw(generic_io_base);
}
#define __BUILD_UNCACHED_IO(bwlq, type) \
static inline type read##bwlq##_uncached(unsigned long addr) \
{ \
type ret; \
jump_to_uncached(); \
ret = __raw_read##bwlq(addr); \
back_to_cached(); \
return ret; \
} \
\
static inline void write##bwlq##_uncached(type v, unsigned long addr) \
{ \
jump_to_uncached(); \
__raw_write##bwlq(v, addr); \
back_to_cached(); \
}
__BUILD_UNCACHED_IO(b, u8)
__BUILD_UNCACHED_IO(w, u16)
__BUILD_UNCACHED_IO(l, u32)
__BUILD_UNCACHED_IO(q, u64)
#define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void __raw_writes##bwlq(volatile void __iomem *mem, \

View file

@ -11,7 +11,9 @@
#define PMB_ADDR 0xf6100000
#define PMB_DATA 0xf7100000
#define PMB_ENTRY_MAX 16
#define NR_PMB_ENTRIES 16
#define PMB_E_MASK 0x0000000f
#define PMB_E_SHIFT 8
@ -25,6 +27,7 @@
#define PMB_C 0x00000008
#define PMB_WT 0x00000001
#define PMB_UB 0x00000200
#define PMB_CACHE_MASK (PMB_C | PMB_WT | PMB_UB)
#define PMB_V 0x00000100
#define PMB_NO_ENTRY (-1)
@ -32,6 +35,7 @@
#ifndef __ASSEMBLY__
#include <linux/errno.h>
#include <linux/threads.h>
#include <asm/page.h>
/* Default "unsigned long" context */
typedef unsigned long mm_context_id_t[NR_CPUS];
@ -49,46 +53,22 @@ typedef struct {
#endif
} mm_context_t;
struct pmb_entry;
struct pmb_entry {
unsigned long vpn;
unsigned long ppn;
unsigned long flags;
/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
* PMB_NO_ENTRY to search for a free one
*/
int entry;
struct pmb_entry *next;
/* Adjacent entry link for contiguous multi-entry mappings */
struct pmb_entry *link;
};
#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags);
unsigned long size, pgprot_t prot);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
void pmb_init(void);
bool __in_29bit_mode(void);
#else
static inline long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags)
unsigned long size, pgprot_t prot)
{
return -EINVAL;
}
static inline void pmb_unmap(unsigned long addr)
{
}
static inline int pmb_init(void)
{
return -ENODEV;
}
#define pmb_unmap(addr) do { } while (0)
#define pmb_init(addr) do { } while (0)
#ifdef CONFIG_29BIT
#define __in_29bit_mode() (1)

View file

@ -45,6 +45,7 @@
#endif
#ifndef __ASSEMBLY__
#include <asm/uncached.h>
extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
@ -56,7 +57,6 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)
return (addr1 ^ addr2) & shm_align_mask;
}
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
@ -127,12 +127,7 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
#if defined(CONFIG_PMB_LEGACY)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
#elif defined(CONFIG_32BIT)
#ifdef CONFIG_PMB
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else
@ -140,6 +135,14 @@ typedef struct page *pgtable_t;
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#endif
#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)

View file

@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
#ifdef CONFIG_UNCACHED_MAPPING
/*
* If PC points in to the uncached mapping, fix it up and hand
* back the cached equivalent.
*/
if ((pc >= (memory_start + cached_to_uncached)) &&
(pc < (memory_start + cached_to_uncached + uncached_size)))
pc -= cached_to_uncached;
#endif
if (virt_addr_uncached(pc))
return CAC_ADDR(pc);
return pc;
}

View file

@ -0,0 +1,18 @@
#ifndef __ASM_SH_UNCACHED_H
#define __ASM_SH_UNCACHED_H
#include <linux/bug.h>
#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
extern void uncached_resize(unsigned long size);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#define uncached_resize(size) BUG()
#endif
#endif /* __ASM_SH_UNCACHED_H */

View file

@ -12,6 +12,7 @@
#define __ASM_CPU_SH4_SQ_H
#include <asm/addrspace.h>
#include <asm/page.h>
/*
* Store queues range from e0000000-e3fffffc, allowing approx. 64MB to be
@ -28,7 +29,7 @@
/* arch/sh/kernel/cpu/sh4/sq.c */
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, unsigned long flags);
const char *name, pgprot_t prot);
void sq_unmap(unsigned long vaddr);
void sq_flush_range(unsigned long start, unsigned int len);

View file

@ -24,6 +24,7 @@
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/sh_bios.h>
#ifdef CONFIG_SH_FPU
#define cpu_has_fpu 1
@ -342,9 +343,21 @@ asmlinkage void __init sh_cpu_init(void)
speculative_execution_init();
expmask_init();
/*
* Boot processor to setup the FP and extended state context info.
*/
if (raw_smp_processor_id() == 0)
/* Do the rest of the boot processor setup */
if (raw_smp_processor_id() == 0) {
/* Save off the BIOS VBR, if there is one */
sh_bios_vbr_init();
/*
* Setup VBR for boot CPU. Secondary CPUs do this through
* start_secondary().
*/
per_cpu_trap_init();
/*
* Boot processor to setup the FP and extended state
* context info.
*/
init_thread_xstate();
}
}

View file

@ -100,7 +100,7 @@ static inline void sq_mapping_list_del(struct sq_mapping *map)
spin_unlock_irq(&sq_mapping_lock);
}
static int __sq_remap(struct sq_mapping *map, unsigned long flags)
static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
{
#if defined(CONFIG_MMU)
struct vm_struct *vma;
@ -113,7 +113,7 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
vma->phys_addr, __pgprot(flags))) {
vma->phys_addr, prot)) {
vunmap(vma->addr);
return -EAGAIN;
}
@ -135,14 +135,14 @@ static int __sq_remap(struct sq_mapping *map, unsigned long flags)
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
* @flags: Protection flags.
* @prot: Protection bits.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the sysfs interface.
*/
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, unsigned long flags)
const char *name, pgprot_t prot)
{
struct sq_mapping *map;
unsigned long end;
@ -177,7 +177,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size,
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
ret = __sq_remap(map, pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
ret = __sq_remap(map, prot);
if (unlikely(ret != 0))
goto out;
@ -309,8 +309,7 @@ static ssize_t mapping_store(const char *buf, size_t count)
return -EIO;
if (likely(len)) {
int ret = sq_remap(base, len, "Userspace",
pgprot_val(PAGE_SHARED));
int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
if (ret < 0)
return ret;
} else

View file

@ -85,7 +85,7 @@ ENTRY(_stext)
ldc r0, r7_bank ! ... and initial thread_info
#endif
#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
#ifdef CONFIG_PMB
/*
* Reconfigure the initial PMB mappings setup by the hardware.
*
@ -139,7 +139,6 @@ ENTRY(_stext)
mov.l r0, @r1
mov.l .LMEMORY_SIZE, r5
mov r5, r7
mov #PMB_E_SHIFT, r0
mov #0x1, r4
@ -150,8 +149,43 @@ ENTRY(_stext)
mov.l .LFIRST_ADDR_ENTRY, r2
mov.l .LPMB_ADDR, r3
/*
* First we need to walk the PMB and figure out if there are any
* existing mappings that match the initial mappings VPN/PPN.
* If these have already been established by the bootloader, we
* don't bother setting up new entries here, and let the late PMB
* initialization take care of things instead.
*
* Note that we may need to coalesce and merge entries in order
* to reclaim more available PMB slots, which is much more than
* we want to do at this early stage.
*/
mov #0, r10
mov #NR_PMB_ENTRIES, r9
mov r1, r7 /* temporary PMB_DATA iter */
.Lvalidate_existing_mappings:
mov.l @r7, r8
and r0, r8
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
bt .Lpmb_done
add #1, r10 /* Increment the loop counter */
cmp/eq r9, r10
bf/s .Lvalidate_existing_mappings
add r4, r7 /* Increment to the next PMB_DATA entry */
/*
* If we've fallen through, continue with setting up the initial
* mappings.
*/
mov r5, r7 /* cached_to_uncached */
mov #0, r10
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Uncached mapping
*/
@ -171,6 +205,7 @@ ENTRY(_stext)
add r4, r1
add r4, r3
add #1, r10
#endif
/*
* Iterate over all of the available sizes from largest to
@ -216,6 +251,7 @@ ENTRY(_stext)
__PMB_ITER_BY_SIZE(64)
__PMB_ITER_BY_SIZE(16)
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Now that we can access it, update cached_to_uncached and
* uncached_size.
@ -228,6 +264,7 @@ ENTRY(_stext)
shll16 r7
shll8 r7
mov.l r7, @r0
#endif
/*
* Clear the remaining PMB entries.
@ -236,7 +273,7 @@ ENTRY(_stext)
* r10 = number of entries we've setup so far
*/
mov #0, r1
mov #PMB_ENTRY_MAX, r0
mov #NR_PMB_ENTRIES, r0
.Lagain:
mov.l r1, @r3 /* Clear PMB_ADDR entry */
@ -248,7 +285,8 @@ ENTRY(_stext)
mov.l 6f, r0
icbi @r0
#endif /* !CONFIG_PMB_LEGACY */
.Lpmb_done:
#endif /* CONFIG_PMB */
#ifndef CONFIG_SH_NO_BSS_INIT
/*
@ -300,13 +338,15 @@ ENTRY(stack_start)
6: .long sh_cpu_init
7: .long init_thread_union
#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
#ifdef CONFIG_PMB
.LPMB_ADDR: .long PMB_ADDR
.LPMB_DATA: .long PMB_DATA
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
.LMEMORY_SIZE: .long __MEMORY_SIZE
#ifdef CONFIG_UNCACHED_MAPPING
.Lcached_to_uncached: .long cached_to_uncached
.Luncached_size: .long uncached_size
.LMEMORY_SIZE: .long __MEMORY_SIZE
#endif
#endif

View file

@ -421,6 +421,8 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
uncached_init();
plat_early_device_setup();
/* Let earlyprintk output early console messages */

View file

@ -30,7 +30,6 @@
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#include <asm/sh_bios.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
@ -848,12 +847,6 @@ void __init trap_init(void)
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
/* Save off the BIOS VBR, if there is one */
sh_bios_vbr_init();
/* Setup VBR for boot cpu */
per_cpu_trap_init();
}
void show_stack(struct task_struct *tsk, unsigned long *sp)

View file

@ -14,11 +14,10 @@ OUTPUT_ARCH(sh)
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
#if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \
defined(CONFIG_PMB_LEGACY)
#define MEMORY_OFFSET __MEMORY_START
#ifdef CONFIG_PMB
#define MEMORY_OFFSET 0
#else
#define MEMORY_OFFSET 0
#define MEMORY_OFFSET __MEMORY_START
#endif
ENTRY(_start)

View file

@ -91,16 +91,6 @@ config PMB
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
config PMB_LEGACY
bool "Support legacy boot mappings for PMB"
depends on PMB
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
from the boot loader, and the kernel does not attempt dynamic
management. This is the closest to legacy 29-bit physical mode,
and allows systems to support up to 512MiB of system memory.
config X2TLB
def_bool y
depends on (CPU_SHX2 || CPU_SHX3) && MMU

View file

@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.

View file

@ -26,21 +26,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
#ifdef CONFIG_UNCACHED_MAPPING
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = 0x20000000;
unsigned long uncached_size = SZ_512M;
#endif
#ifdef CONFIG_MMU
static pte_t *__get_pte_phys(unsigned long addr)
{
@ -260,7 +245,6 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
/* Initialize the vDSO */
vsyscall_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text;
@ -303,9 +287,7 @@ void __init mem_init(void)
((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
#ifdef CONFIG_UNCACHED_MAPPING
(unsigned long)memory_start + cached_to_uncached,
(unsigned long)memory_start + cached_to_uncached + uncached_size,
uncached_size >> 20,
uncached_start, uncached_end, uncached_size >> 20,
#endif
(unsigned long)&__init_begin, (unsigned long)&__init_end,

View file

@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
if (unlikely(phys_addr >= P1SEG)) {
unsigned long mapped;
mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
mapped = pmb_remap(addr, phys_addr, size, pgprot);
if (likely(mapped)) {
addr += mapped;
phys_addr += mapped;

View file

@ -21,47 +21,67 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rwlock.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#define NR_PMB_ENTRIES 16
struct pmb_entry;
static void __pmb_unmap(struct pmb_entry *);
struct pmb_entry {
unsigned long vpn;
unsigned long ppn;
unsigned long flags;
unsigned long size;
spinlock_t lock;
/*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or
* PMB_NO_ENTRY to search for a free one
*/
int entry;
/* Adjacent entry link for contiguous multi-entry mappings */
struct pmb_entry *link;
};
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
static unsigned long pmb_map;
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
static inline unsigned long mk_pmb_entry(unsigned int entry)
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
{
return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}
static inline unsigned long mk_pmb_addr(unsigned int entry)
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
{
return mk_pmb_entry(entry) | PMB_ADDR;
}
static inline unsigned long mk_pmb_data(unsigned int entry)
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
{
return mk_pmb_entry(entry) | PMB_DATA;
}
static int pmb_alloc_entry(void)
{
unsigned int pos;
int pos;
repeat:
pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);
if (unlikely(pos > NR_PMB_ENTRIES))
return -ENOSPC;
if (test_and_set_bit(pos, &pmb_map))
goto repeat;
pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
if (pos >= 0 && pos < NR_PMB_ENTRIES)
__set_bit(pos, pmb_map);
else
pos = -ENOSPC;
return pos;
}
@ -70,21 +90,34 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
unsigned long flags, int entry)
{
struct pmb_entry *pmbe;
unsigned long irqflags;
void *ret = NULL;
int pos;
write_lock_irqsave(&pmb_rwlock, irqflags);
if (entry == PMB_NO_ENTRY) {
pos = pmb_alloc_entry();
if (pos < 0)
return ERR_PTR(pos);
if (unlikely(pos < 0)) {
ret = ERR_PTR(pos);
goto out;
}
} else {
if (test_bit(entry, &pmb_map))
return ERR_PTR(-ENOSPC);
if (__test_and_set_bit(entry, pmb_map)) {
ret = ERR_PTR(-ENOSPC);
goto out;
}
pos = entry;
}
write_unlock_irqrestore(&pmb_rwlock, irqflags);
pmbe = &pmb_entry_list[pos];
if (!pmbe)
return ERR_PTR(-ENOMEM);
memset(pmbe, 0, sizeof(struct pmb_entry));
spin_lock_init(&pmbe->lock);
pmbe->vpn = vpn;
pmbe->ppn = ppn;
@ -92,101 +125,113 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
pmbe->entry = pos;
return pmbe;
out:
write_unlock_irqrestore(&pmb_rwlock, irqflags);
return ret;
}
static void pmb_free(struct pmb_entry *pmbe)
{
int pos = pmbe->entry;
__clear_bit(pmbe->entry, pmb_map);
pmbe->vpn = 0;
pmbe->ppn = 0;
pmbe->flags = 0;
pmbe->entry = 0;
clear_bit(pos, &pmb_map);
pmbe->entry = PMB_NO_ENTRY;
pmbe->link = NULL;
}
/*
* Must be in P2 for __set_pmb_entry()
* Ensure that the PMB entries match our cache configuration.
*
* When we are in 32-bit address extended mode, CCR.CB becomes
* invalid, so care must be taken to manually adjust cacheable
* translations.
*/
static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
unsigned long flags, int pos)
static __always_inline unsigned long pmb_cache_flags(void)
{
__raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
unsigned long flags = 0;
#ifdef CONFIG_CACHE_WRITETHROUGH
/*
* When we are in 32-bit address extended mode, CCR.CB becomes
* invalid, so care must be taken to manually adjust cacheable
* translations.
*/
if (likely(flags & PMB_C))
flags |= PMB_WT;
#if defined(CONFIG_CACHE_WRITETHROUGH)
flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
flags |= PMB_C;
#endif
__raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
return flags;
}
/*
* Must be run uncached.
*/
static void __set_pmb_entry(struct pmb_entry *pmbe)
{
writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
mk_pmb_data(pmbe->entry));
}
static void __clear_pmb_entry(struct pmb_entry *pmbe)
{
unsigned long addr, data;
unsigned long addr_val, data_val;
addr = mk_pmb_addr(pmbe->entry);
data = mk_pmb_data(pmbe->entry);
addr_val = __raw_readl(addr);
data_val = __raw_readl(data);
/* Clear V-bit */
writel_uncached(addr_val & ~PMB_V, addr);
writel_uncached(data_val & ~PMB_V, data);
}
static void set_pmb_entry(struct pmb_entry *pmbe)
{
jump_to_uncached();
__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
back_to_cached();
unsigned long flags;
spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags);
}
static void clear_pmb_entry(struct pmb_entry *pmbe)
{
unsigned int entry = pmbe->entry;
unsigned long addr;
if (unlikely(entry >= NR_PMB_ENTRIES))
return;
jump_to_uncached();
/* Clear V-bit */
addr = mk_pmb_addr(entry);
__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
addr = mk_pmb_data(entry);
__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
back_to_cached();
}
static struct {
unsigned long size;
int flag;
} pmb_sizes[] = {
{ .size = 0x20000000, .flag = PMB_SZ_512M, },
{ .size = 0x08000000, .flag = PMB_SZ_128M, },
{ .size = 0x04000000, .flag = PMB_SZ_64M, },
{ .size = 0x01000000, .flag = PMB_SZ_16M, },
{ .size = SZ_512M, .flag = PMB_SZ_512M, },
{ .size = SZ_128M, .flag = PMB_SZ_128M, },
{ .size = SZ_64M, .flag = PMB_SZ_64M, },
{ .size = SZ_16M, .flag = PMB_SZ_16M, },
};
long pmb_remap(unsigned long vaddr, unsigned long phys,
unsigned long size, unsigned long flags)
unsigned long size, pgprot_t prot)
{
struct pmb_entry *pmbp, *pmbe;
unsigned long wanted;
int pmb_flags, i;
long err;
u64 flags;
flags = pgprot_val(prot);
pmb_flags = PMB_WT | PMB_UB;
/* Convert typical pgprot value to the PMB equivalent */
if (flags & _PAGE_CACHABLE) {
if (flags & _PAGE_WT)
pmb_flags = PMB_WT;
else
pmb_flags = PMB_C;
} else
pmb_flags = PMB_WT | PMB_UB;
pmb_flags |= PMB_C;
if ((flags & _PAGE_WT) == 0)
pmb_flags &= ~(PMB_WT | PMB_UB);
}
pmbp = NULL;
wanted = size;
again:
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
unsigned long flags;
if (size < pmb_sizes[i].size)
continue;
@ -197,18 +242,25 @@ again:
goto out;
}
set_pmb_entry(pmbe);
spin_lock_irqsave(&pmbe->lock, flags);
__set_pmb_entry(pmbe);
phys += pmb_sizes[i].size;
vaddr += pmb_sizes[i].size;
size -= pmb_sizes[i].size;
pmbe->size = pmb_sizes[i].size;
/*
* Link adjacent entries that span multiple PMB entries
* for easier tear-down.
*/
if (likely(pmbp))
if (likely(pmbp)) {
spin_lock(&pmbp->lock);
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
@ -218,16 +270,17 @@ again:
* pmb_sizes[i].size again.
*/
i--;
spin_unlock_irqrestore(&pmbe->lock, flags);
}
if (size >= 0x1000000)
if (size >= SZ_16M)
goto again;
return wanted - size;
out:
if (pmbp)
__pmb_unmap(pmbp);
pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
return err;
}
@ -237,24 +290,52 @@ void pmb_unmap(unsigned long addr)
struct pmb_entry *pmbe = NULL;
int i;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, &pmb_map)) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
if (pmbe->vpn == addr)
break;
}
}
if (unlikely(!pmbe))
return;
read_unlock(&pmb_rwlock);
__pmb_unmap(pmbe);
pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
}
static void __pmb_unmap(struct pmb_entry *pmbe)
static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{
BUG_ON(!test_bit(pmbe->entry, &pmb_map));
return (b->vpn == (a->vpn + a->size)) &&
(b->ppn == (a->ppn + a->size)) &&
(b->flags == a->flags);
}
static bool pmb_size_valid(unsigned long size)
{
int i;
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
if (pmb_sizes[i].size == size)
return true;
return false;
}
static int pmb_size_to_flags(unsigned long size)
{
int i;
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
if (pmb_sizes[i].size == size)
return pmb_sizes[i].flag;
return 0;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
do {
struct pmb_entry *pmblink = pmbe;
@ -265,52 +346,91 @@ static void __pmb_unmap(struct pmb_entry *pmbe)
* this entry in pmb_alloc() (even if we haven't filled
* it yet).
*
* Therefore, calling clear_pmb_entry() is safe as no
* Therefore, calling __clear_pmb_entry() is safe as no
* other mapping can be using that slot.
*/
clear_pmb_entry(pmbe);
__clear_pmb_entry(pmbe);
pmbe = pmblink->link;
pmb_free(pmblink);
} while (pmbe);
} while (pmbe && --depth);
}
#ifdef CONFIG_PMB_LEGACY
static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
unsigned long flags;
if (unlikely(!pmbe))
return;
write_lock_irqsave(&pmb_rwlock, flags);
__pmb_unmap_entry(pmbe, depth);
write_unlock_irqrestore(&pmb_rwlock, flags);
}
static int pmb_apply_legacy_mappings(void)
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}
static void __init pmb_notify(void)
{
unsigned int applied = 0;
int i;
pr_info("PMB: Preserving legacy mappings:\n");
pr_info("PMB: boot mappings:\n");
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
}
read_unlock(&pmb_rwlock);
}
/*
* Sync our software copy of the PMB mappings with those in hardware. The
* mappings in the hardware PMB were either set up by the bootloader or
* very early on by the kernel.
*/
static void __init pmb_synchronize(void)
{
struct pmb_entry *pmbp = NULL;
int i, j;
/*
* The following entries are setup by the bootloader.
* Run through the initial boot mappings, log the established
* ones, and blow away anything that falls outside of the valid
* PPN range. Specifically, we only care about existing mappings
* that impact the cached/uncached sections.
*
* Entry VPN PPN V SZ C UB
* --------------------------------------------------------
* 0 0xA0000000 0x00000000 1 64MB 0 0
* 1 0xA4000000 0x04000000 1 16MB 0 0
* 2 0xA6000000 0x08000000 1 16MB 0 0
* 9 0x88000000 0x48000000 1 128MB 1 1
* 10 0x90000000 0x50000000 1 128MB 1 1
* 11 0x98000000 0x58000000 1 128MB 1 1
* 13 0xA8000000 0x48000000 1 128MB 0 0
* 14 0xB0000000 0x50000000 1 128MB 0 0
* 15 0xB8000000 0x58000000 1 128MB 0 0
* Note that touching these can be a bit of a minefield; the boot
* loader can establish multi-page mappings with the same caching
* attributes, so we need to ensure that we aren't modifying a
* mapping that we're presently executing from, or may execute
* from in the case of straddling page boundaries.
*
* The only entries the we need are the ones that map the kernel
* at the cached and uncached addresses.
* In the future we will have to tidy up after the boot loader by
* jumping between the cached and uncached mappings and tearing
* down alternating mappings while executing from the other.
*/
for (i = 0; i < PMB_ENTRY_MAX; i++) {
for (i = 0; i < NR_PMB_ENTRIES; i++) {
unsigned long addr, data;
unsigned long addr_val, data_val;
unsigned long ppn, vpn;
unsigned long ppn, vpn, flags;
unsigned long irqflags;
unsigned int size;
struct pmb_entry *pmbe;
addr = mk_pmb_addr(i);
data = mk_pmb_data(i);
@ -330,110 +450,202 @@ static int pmb_apply_legacy_mappings(void)
/*
* Only preserve in-range mappings.
*/
if (pmb_ppn_in_range(ppn)) {
unsigned int size;
char *sz_str = NULL;
size = data_val & PMB_SZ_MASK;
sz_str = (size == PMB_SZ_16M) ? " 16MB":
(size == PMB_SZ_64M) ? " 64MB":
(size == PMB_SZ_128M) ? "128MB":
"512MB";
pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
(data_val & PMB_C) ? "" : "un");
applied++;
} else {
if (!pmb_ppn_in_range(ppn)) {
/*
* Invalidate anything out of bounds.
*/
__raw_writel(addr_val & ~PMB_V, addr);
__raw_writel(data_val & ~PMB_V, data);
}
}
return (applied == 0);
}
#else
static inline int pmb_apply_legacy_mappings(void)
{
return 1;
}
#endif
int pmb_init(void)
{
int i;
unsigned long addr, data;
unsigned long ret;
jump_to_uncached();
/*
* Attempt to apply the legacy boot mappings if configured. If
* this is successful then we simply carry on with those and
* don't bother establishing additional memory mappings. Dynamic
* device mappings through pmb_remap() can still be bolted on
* after this.
*/
ret = pmb_apply_legacy_mappings();
if (ret == 0) {
back_to_cached();
return 0;
}
/*
* Sync our software copy of the PMB mappings with those in
* hardware. The mappings in the hardware PMB were either set up
* by the bootloader or very early on by the kernel.
*/
for (i = 0; i < PMB_ENTRY_MAX; i++) {
struct pmb_entry *pmbe;
unsigned long vpn, ppn, flags;
addr = PMB_DATA + (i << PMB_E_SHIFT);
data = __raw_readl(addr);
if (!(data & PMB_V))
writel_uncached(addr_val & ~PMB_V, addr);
writel_uncached(data_val & ~PMB_V, data);
continue;
if (data & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
data |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
data &= ~PMB_WT;
#else
data &= ~(PMB_C | PMB_WT);
#endif
}
__raw_writel(data, addr);
ppn = data & PMB_PFN_MASK;
/*
* Update the caching attributes if necessary
*/
if (data_val & PMB_C) {
data_val &= ~PMB_CACHE_MASK;
data_val |= pmb_cache_flags();
flags = data & (PMB_C | PMB_WT | PMB_UB);
flags |= data & PMB_SZ_MASK;
writel_uncached(data_val, data);
}
addr = PMB_ADDR + (i << PMB_E_SHIFT);
data = __raw_readl(addr);
vpn = data & PMB_PFN_MASK;
size = data_val & PMB_SZ_MASK;
flags = size | (data_val & PMB_CACHE_MASK);
pmbe = pmb_alloc(vpn, ppn, flags, i);
WARN_ON(IS_ERR(pmbe));
if (IS_ERR(pmbe)) {
WARN_ON_ONCE(1);
continue;
}
spin_lock_irqsave(&pmbe->lock, irqflags);
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
if (pmbp) {
spin_lock(&pmbp->lock);
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
* setup the entry links accordingly. Compound mappings
* are later coalesced.
*/
if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
spin_unlock_irqrestore(&pmbe->lock, irqflags);
}
}
static void __init pmb_merge(struct pmb_entry *head)
{
unsigned long span, newsize;
struct pmb_entry *tail;
int i = 1, depth = 0;
span = newsize = head->size;
tail = head->link;
while (tail) {
span += tail->size;
if (pmb_size_valid(span)) {
newsize = span;
depth = i;
}
/* This is the end of the line.. */
if (!tail->link)
break;
tail = tail->link;
i++;
}
__raw_writel(0, PMB_IRMCR);
/*
* The merged page size must be valid.
*/
if (!pmb_size_valid(newsize))
return;
head->flags &= ~PMB_SZ_MASK;
head->flags |= pmb_size_to_flags(newsize);
head->size = newsize;
__pmb_unmap_entry(head->link, depth);
__set_pmb_entry(head);
}
static void __init pmb_coalesce(void)
{
unsigned long flags;
int i;
write_lock_irqsave(&pmb_rwlock, flags);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
/*
* We're only interested in compound mappings
*/
if (!pmbe->link)
continue;
/*
* Nothing to do if it already uses the largest possible
* page size.
*/
if (pmbe->size == SZ_512M)
continue;
pmb_merge(pmbe);
}
write_unlock_irqrestore(&pmb_rwlock, flags);
}
#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
int i;
/*
* If the uncached mapping was constructed by the kernel, it will
* already be a reasonable size.
*/
if (uncached_size == SZ_16M)
return;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
struct pmb_entry *pmbe;
unsigned long flags;
if (!test_bit(i, pmb_map))
continue;
pmbe = &pmb_entry_list[i];
if (pmbe->vpn != uncached_start)
continue;
/*
* Found it, now resize it.
*/
spin_lock_irqsave(&pmbe->lock, flags);
pmbe->size = SZ_16M;
pmbe->flags &= ~PMB_SZ_MASK;
pmbe->flags |= pmb_size_to_flags(pmbe->size);
uncached_resize(pmbe->size);
__set_pmb_entry(pmbe);
spin_unlock_irqrestore(&pmbe->lock, flags);
}
read_lock(&pmb_rwlock);
}
#endif
void __init pmb_init(void)
{
/* Synchronize software state */
pmb_synchronize();
/* Attempt to combine compound mappings */
pmb_coalesce();
#ifdef CONFIG_UNCACHED_MAPPING
/* Resize initial mappings, if necessary */
pmb_resize();
#endif
/* Log them */
pmb_notify();
writel_uncached(0, PMB_IRMCR);
/* Flush out the TLB */
i = __raw_readl(MMUCR);
i |= MMUCR_TI;
__raw_writel(i, MMUCR);
back_to_cached();
return 0;
__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
ctrl_barrier();
}
bool __in_29bit_mode(void)
@ -513,14 +725,21 @@ static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
if (state.event == PM_EVENT_ON &&
prev_state.event == PM_EVENT_FREEZE) {
struct pmb_entry *pmbe;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, &pmb_map)) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
set_pmb_entry(pmbe);
}
}
read_unlock(&pmb_rwlock);
}
prev_state = state;
return 0;
}

34
arch/sh/mm/uncached.c Normal file
View file

@ -0,0 +1,34 @@
#include <linux/init.h>
#include <asm/sizes.h>
#include <asm/page.h>
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = SZ_512M;
unsigned long uncached_size = SZ_512M;
unsigned long uncached_start, uncached_end;
int virt_addr_uncached(unsigned long kaddr)
{
return (kaddr >= uncached_start) && (kaddr < uncached_end);
}
void __init uncached_init(void)
{
uncached_start = memory_end;
uncached_end = uncached_start + uncached_size;
}
void __init uncached_resize(unsigned long size)
{
uncached_size = size;
uncached_end = uncached_start + uncached_size;
}

View file

@ -831,7 +831,7 @@ static int __devinit pvr2fb_common_init(void)
printk(KERN_NOTICE "fb%d: registering with SQ API\n", fb_info->node);
pvr2fb_map = sq_remap(fb_info->fix.smem_start, fb_info->fix.smem_len,
fb_info->fix.id, pgprot_val(PAGE_SHARED));
fb_info->fix.id, PAGE_SHARED);
printk(KERN_NOTICE "fb%d: Mapped video memory to SQ addr 0x%lx\n",
fb_info->node, pvr2fb_map);