diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 208a2d465b92..4bfb9ffbdbc1 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -51,6 +51,9 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned ff000000 ffbfffff Reserved for future expansion of DMA mapping region. +fee00000 feffffff Mapping of PCI I/O space. This is a static + mapping within the vmalloc space. + VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. Memory returned by vmalloc/ioremap will be dynamically placed in this region. diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 815c669fec0a..8f4db67533e5 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -113,11 +113,19 @@ static inline void __iomem *__typesafe_io(unsigned long addr) #define __iowmb() do { } while (0) #endif +/* PCI fixed i/o mapping */ +#define PCI_IO_VIRT_BASE 0xfee00000 + +extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); + /* * Now, pick up the machine-defined IO definitions */ #ifdef CONFIG_NEED_MACH_IO_H #include +#elif defined(CONFIG_PCI) +#define IO_SPACE_LIMIT ((resource_size_t)0xfffff) +#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT)) #else #define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) #endif diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index a6efcdd6fd25..195ac2f9d3d3 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -9,6 +9,9 @@ * * Page table mapping constructs and function prototypes */ +#ifndef __ASM_MACH_MAP_H +#define __ASM_MACH_MAP_H + #include struct map_desc { @@ -34,6 +37,8 @@ struct map_desc { #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); +extern void vm_reserve_area_early(unsigned long addr, unsigned long size, + void *caller); struct mem_type; extern const struct mem_type *get_mem_type(unsigned int type); @@ -44,4 +49,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype); #else #define iotable_init(map,num) do { } while (0) +#define vm_reserve_area_early(a,s,c) do { } while (0) +#endif + #endif diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 26c511fddf8f..df818876fa31 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -11,6 +11,7 @@ #ifndef __ASM_MACH_PCI_H #define __ASM_MACH_PCI_H + struct pci_sys_data; struct pci_ops; struct pci_bus; @@ -54,6 +55,15 @@ struct pci_sys_data { */ void pci_common_init(struct hw_pci *); +/* + * Setup early fixed I/O mapping. + */ +#if defined(CONFIG_PCI) +extern void pci_map_io_early(unsigned long pfn); +#else +static inline void pci_map_io_early(unsigned long pfn) {} +#endif + /* * PCI controllers */ diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 25552508c3fd..c3165f0fef63 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -13,6 +13,7 @@ #include #include +#include #include static int debug_pci; @@ -627,3 +628,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, return 0; } + +void __init pci_map_io_early(unsigned long pfn) +{ + struct map_desc pci_io_desc = { + .virtual = PCI_IO_VIRT_BASE, + .type = MT_DEVICE, + .length = SZ_64K, + }; + + pci_io_desc.pfn = pfn; + iotable_init(&pci_io_desc, 1); +} diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 4f55f5062ab7..8727802f8661 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -36,6 +36,7 @@ #include #include +#include #include "mm.h" int ioremap_page(unsigned long virt, unsigned long phys, @@ -383,3 +384,16 @@ void __arm_iounmap(volatile void __iomem *io_addr) arch_iounmap(io_addr); } EXPORT_SYMBOL(__arm_iounmap); + +#ifdef CONFIG_PCI +int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) +{ + BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); + + return ioremap_page_range(PCI_IO_VIRT_BASE + offset, + PCI_IO_VIRT_BASE + offset + SZ_64K, + phys_addr, + __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); +} +EXPORT_SYMBOL_GPL(pci_ioremap_io); +#endif diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index cf4528d51774..714a7fd99ca3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -31,6 +31,7 @@ #include #include +#include #include "mm.h" @@ -216,7 +217,7 @@ static struct mem_type mem_types[] = { .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, - }, + }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, @@ -783,14 +784,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(md); vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); - vm->phys_addr = __pfn_to_phys(md->pfn); - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->phys_addr = __pfn_to_phys(md->pfn); + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; vm_area_add_early(vm++); } } +void __init vm_reserve_area_early(unsigned long addr, unsigned long size, + void *caller) +{ + struct vm_struct *vm; + + vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm->addr = (void *)addr; + vm->size = size; + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->caller = caller; + vm_area_add_early(vm); +} + #ifndef CONFIG_ARM_LPAE /* @@ -808,14 +822,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) static void __init pmd_empty_section_gap(unsigned long addr) { - struct vm_struct *vm; - - vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); - vm->addr = (void *)addr; - vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; - vm->caller = pmd_empty_section_gap; - vm_area_add_early(vm); + vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); } static void __init fill_pmd_gaps(void) @@ -864,6 +871,28 @@ static void __init fill_pmd_gaps(void) #define fill_pmd_gaps() do { } while (0) #endif +#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) +static void __init pci_reserve_io(void) +{ + struct vm_struct *vm; + unsigned long addr; + + /* we're still single threaded hence no lock needed here */ + for (vm = vmlist; vm; vm = vm->next) { + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + addr = (unsigned long)vm->addr; + addr &= ~(SZ_2M - 1); + if (addr == PCI_IO_VIRT_BASE) + return; + + } + vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); +} +#else +#define pci_reserve_io() do { } while (0) +#endif + static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); @@ -1147,6 +1176,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc) mdesc->map_io(); fill_pmd_gaps(); + /* Reserve fixed i/o space in VMALLOC region */ + pci_reserve_io(); + /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that