[MIPS] 64-bit Sibyte kernels need DMA32.

Sibyte SOCs only have 32-bit PCI.  Due to the sparse use of the address
space only the first 1GB of memory is mapped at physical addresses
below 1GB.  If a system has more than 1GB of memory 32-bit DMA will
not be able to reach all of it.

For now this patch is good enough to keep Sibyte users happy but it seems
eventually something like swiotlb will be needed for Sibyte.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle 2007-11-03 02:05:43 +00:00
parent 940f6b48a1
commit cce335ae47
5 changed files with 83 additions and 37 deletions

View file

@ -515,6 +515,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_KGDB select SYS_SUPPORTS_KGDB
select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
config SIBYTE_LITTLESUR config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur" bool "Sibyte BCM91250C2-LittleSur"
@ -565,6 +566,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
config SNI_RM config SNI_RM
bool "SNI RM200/300/400" bool "SNI RM200/300/400"
@ -1664,6 +1666,9 @@ config ARCH_DISCONTIGMEM_ENABLE
or have huge holes in the physical address space for other reasons. or have huge holes in the physical address space for other reasons.
See <file:Documentation/vm/numa> for more. See <file:Documentation/vm/numa> for more.
config ARCH_POPULATES_NODE_MAP
def_bool y
config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_ENABLE
bool bool
select SPARSEMEM_STATIC select SPARSEMEM_STATIC
@ -1969,6 +1974,9 @@ config I8253
config PCSPEAKER config PCSPEAKER
bool bool
config ZONE_DMA32
bool
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig" source "drivers/pci/hotplug/Kconfig"

View file

@ -269,7 +269,7 @@ static void __init bootmem_init(void)
static void __init bootmem_init(void) static void __init bootmem_init(void)
{ {
unsigned long reserved_end; unsigned long init_begin, reserved_end;
unsigned long mapstart = ~0UL; unsigned long mapstart = ~0UL;
unsigned long bootmap_size; unsigned long bootmap_size;
int i; int i;
@ -342,6 +342,35 @@ static void __init bootmem_init(void)
*/ */
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart, bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn); min_low_pfn, max_low_pfn);
init_begin = PFN_UP(__pa_symbol(&__init_begin));
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
if (start <= init_begin)
start = init_begin;
if (start >= end)
continue;
#ifndef CONFIG_HIGHMEM
if (end > max_low_pfn)
end = max_low_pfn;
/*
* ... finally, is the area going away?
*/
if (end <= start)
continue;
#endif
add_active_range(0, start, end);
}
/* /*
* Register fully available low RAM pages with the bootmem allocator. * Register fully available low RAM pages with the bootmem allocator.
*/ */

View file

@ -40,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
current_cpu_type() == CPU_R12000); current_cpu_type() == CPU_R12000);
} }
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
#ifdef CONFIG_ZONE_DMA32
if (dev == NULL)
gfp |= __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
gfp |= __GFP_DMA;
else
#endif
#ifdef CONFIG_ZONE_DMA32
if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
gfp |= __GFP_DMA32;
else
#endif
;
/* Don't invoke OOM killer */
gfp |= __GFP_NORETRY;
return gfp;
}
void *dma_alloc_noncoherent(struct device *dev, size_t size, void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t gfp) dma_addr_t * dma_handle, gfp_t gfp)
{ {
void *ret; void *ret;
/* ignore region specifiers */ gfp = massage_gfp_flags(dev, gfp);
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size)); ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret != NULL) { if (ret != NULL) {
@ -67,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
{ {
void *ret; void *ret;
/* ignore region specifiers */ gfp = massage_gfp_flags(dev, gfp);
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *) __get_free_pages(gfp, get_order(size)); ret = (void *) __get_free_pages(gfp, get_order(size));
if (ret) { if (ret) {
@ -343,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
* so we can't guarantee allocations that must be * so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA.. * within a tighter range than GFP_DMA..
*/ */
if (mask < 0x00ffffff) if (mask < DMA_BIT_MASK(24))
return 0; return 0;
return 1; return 1;

View file

@ -347,11 +347,8 @@ static int __init page_is_ram(unsigned long pagenr)
void __init paging_init(void) void __init paging_init(void)
{ {
unsigned long zones_size[MAX_NR_ZONES] = { 0, }; unsigned long max_zone_pfns[MAX_NR_ZONES];
#ifndef CONFIG_FLATMEM unsigned long lastpfn;
unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
unsigned long i, j, pfn;
#endif
pagetable_init(); pagetable_init();
@ -361,35 +358,27 @@ void __init paging_init(void)
kmap_coherent_init(); kmap_coherent_init();
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) { max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
} else if (max_low_pfn < MAX_DMA_PFN)
zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
else
#endif #endif
zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; #ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
lastpfn = max_low_pfn;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn; max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
lastpfn = highend_pfn;
if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) { if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
printk(KERN_WARNING "This processor doesn't support highmem." printk(KERN_WARNING "This processor doesn't support highmem."
" %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]); " %ldk highmem ignored\n",
zones_size[ZONE_HIGHMEM] = 0; (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
lastpfn = max_low_pfn;
} }
#endif #endif
#ifdef CONFIG_FLATMEM free_area_init_nodes(max_zone_pfns);
free_area_init(zones_size);
#else
pfn = min_low_pfn;
for (i = 0; i < MAX_NR_ZONES; i++)
for (j = 0; j < zones_size[i]; j++, pfn++)
if (!page_is_ram(pfn))
zholes_size[i]++;
free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
#endif
} }
static struct kcore_list kcore_mem, kcore_vmalloc; static struct kcore_list kcore_mem, kcore_vmalloc;

View file

@ -92,6 +92,7 @@
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000) #define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x01000000)
#endif #endif
#define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS)) #define MAX_DMA_PFN PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
/* 8237 DMA controllers */ /* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */