alistair23-linux/arch/metag/mm/ioremap.c
James Hogan 373cd784d0 metag: Memory handling
Meta has instructions for accessing:
 - bytes        - GETB (1 byte)
 - words        - GETW (2 bytes)
 - doublewords  - GETD (4 bytes)
 - longwords    - GETL (8 bytes)

All accesses must be aligned. Unaligned accesses can be detected and
made to fault on Meta2, however it isn't possible to fix up unaligned
writes so we don't bother fixing up reads either.

This patch adds metag memory handling code including:
 - I/O memory (io.h, ioremap.c): Actually any virtual memory can be
   accessed with these helpers. A part of the non-MMUable address space
   is used for memory mapped I/O. The ioremap() function is implemented
   one to one for non-MMUable addresses.
 - User memory (uaccess.h, usercopy.c): User memory is directly
   accessible from privileged code.
 - Kernel memory (maccess.c): probe_kernel_write() needs to be
   overwridden to use the I/O functions when doing a simple aligned
   write to non-writecombined memory, otherwise the write may be split
   by the generic version.

Note that due to the fact that a portion of the virtual address space is
non-MMUable, and therefore always maps directly to the physical address
space, metag specific I/O functions are made available (metag_in32,
metag_out32 etc). These cast the address argument to a pointer so that
they can be used with raw physical addresses. These accessors are only
to be used for accessing fixed core Meta architecture registers in the
non-MMU region, and not for any SoC/peripheral registers.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
2013-03-02 20:09:19 +00:00

90 lines
2.3 KiB
C

/*
* Re-map IO memory to kernel address space so that we can access it.
* Needed for memory-mapped I/O devices mapped outside our normal DRAM
* window (that is, all memory-mapped I/O devices).
*
* Copyright (C) 1995,1996 Linus Torvalds
*
* Meta port based on CRIS-port by Axis Communications AB
*/
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void __iomem *__ioremap(unsigned long phys_addr, size_t size,
unsigned long flags)
{
unsigned long addr;
struct vm_struct *area;
unsigned long offset, last_addr;
pgprot_t prot;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/* Custom region addresses are accessible and uncached by default. */
if (phys_addr >= LINSYSCUSTOM_BASE &&
phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
return (__force void __iomem *) phys_addr;
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
_PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
flags);
/*
* Ok, go for it..
*/
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = (unsigned long) area->addr;
if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
vunmap((void *) addr);
return NULL;
}
return (__force void __iomem *) (offset + (char *)addr);
}
EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
struct vm_struct *p;
if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
(__force unsigned long)addr < (LINSYSCUSTOM_BASE +
LINSYSCUSTOM_LIMIT))
return;
p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
if (unlikely(!p)) {
pr_err("iounmap: bad address %p\n", addr);
return;
}
kfree(p);
}
EXPORT_SYMBOL(__iounmap);