1
0
Fork 0

ARM: add permission annotations to MT_MEMORY* mapping types

Document the permissions which the various MT_MEMORY* mapping types
will provide.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
hifive-unleashed-5.1
Russell King 2013-10-24 10:26:40 +01:00
parent 1fd15b879d
commit 2e2c9de207
8 changed files with 38 additions and 36 deletions

View File

@ -22,18 +22,20 @@ struct map_desc {
};
/* types 0-3 are defined in asm/io.h */
#define MT_UNCACHED 4
#define MT_CACHECLEAN 5
#define MT_MINICLEAN 6
#define MT_LOW_VECTORS 7
#define MT_HIGH_VECTORS 8
#define MT_MEMORY 9
#define MT_ROM 10
#define MT_MEMORY_NONCACHED 11
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
#define MT_MEMORY_DMA_READY 15
enum {
MT_UNCACHED = 4,
MT_CACHECLEAN,
MT_MINICLEAN,
MT_LOW_VECTORS,
MT_HIGH_VECTORS,
MT_MEMORY_RWX,
MT_ROM,
MT_MEMORY_RWX_NONCACHED,
MT_MEMORY_RW_DTCM,
MT_MEMORY_RWX_ITCM,
MT_MEMORY_RW_SO,
MT_MEMORY_DMA_READY,
};
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);

View File

@ -52,7 +52,7 @@ static struct map_desc dtcm_iomap[] __initdata = {
.virtual = DTCM_OFFSET,
.pfn = __phys_to_pfn(DTCM_OFFSET),
.length = 0,
.type = MT_MEMORY_DTCM
.type = MT_MEMORY_RW_DTCM
}
};
@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
.virtual = ITCM_OFFSET,
.pfn = __phys_to_pfn(ITCM_OFFSET),
.length = 0,
.type = MT_MEMORY_ITCM
.type = MT_MEMORY_RWX_ITCM,
}
};

View File

@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
desc->pfn = __phys_to_pfn(base);
desc->length = length;
desc->type = MT_MEMORY_NONCACHED;
desc->type = MT_MEMORY_RWX_NONCACHED;
pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
base, length, desc->virtual);

View File

@ -244,7 +244,7 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
.virtual = OMAP4_SRAM_VA,
.pfn = __phys_to_pfn(OMAP4_SRAM_PA),
.length = PAGE_SIZE,
.type = MT_MEMORY_SO,
.type = MT_MEMORY_RW_SO,
},
#endif
@ -282,7 +282,7 @@ static struct map_desc omap54xx_io_desc[] __initdata = {
.virtual = OMAP4_SRAM_VA,
.pfn = __phys_to_pfn(OMAP4_SRAM_PA),
.length = PAGE_SIZE,
.type = MT_MEMORY_SO,
.type = MT_MEMORY_RW_SO,
},
#endif
};

View File

@ -88,7 +88,7 @@ void __init omap_barriers_init(void)
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
dram_io_desc[0].pfn = __phys_to_pfn(paddr);
dram_io_desc[0].length = size;
dram_io_desc[0].type = MT_MEMORY_SO;
dram_io_desc[0].type = MT_MEMORY_RW_SO;
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
sram_sync = (void __iomem *) OMAP4_SRAM_VA;

View File

@ -43,7 +43,7 @@ extern void ux500_timer_init(void);
.virtual = IO_ADDRESS(x), \
.pfn = __phys_to_pfn(x), \
.length = sz, \
.type = MT_MEMORY, \
.type = MT_MEMORY_RWX, \
}
extern struct smp_operations ux500_smp_ops;

View File

@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
unsigned int mtype;
if (cached)
mtype = MT_MEMORY;
mtype = MT_MEMORY_RWX;
else
mtype = MT_MEMORY_NONCACHED;
mtype = MT_MEMORY_RWX_NONCACHED;
return __arm_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));

View File

@ -287,7 +287,7 @@ static struct mem_type mem_types[] = {
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
@ -297,26 +297,26 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
[MT_MEMORY_RWX_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_DTCM] = {
[MT_MEMORY_RW_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_ITCM] = {
[MT_MEMORY_RWX_ITCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_SO] = {
[MT_MEMORY_RW_SO] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_MT_UNCACHED | L_PTE_XN,
.prot_l1 = PMD_TYPE_TABLE,
@ -487,11 +487,11 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
@ -502,15 +502,15 @@ static void __init build_mem_type_table(void)
if (cpu_arch >= CPU_ARCH_ARMv6) {
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/* Non-cacheable Normal is XCB = 001 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_BUFFERED;
} else {
/* For both ARMv6 and non-TEX-remapping ARMv7 */
mem_types[MT_MEMORY_NONCACHED].prot_sect |=
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
PMD_SECT_TEX(1);
}
} else {
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
#ifdef CONFIG_ARM_LPAE
@ -543,10 +543,10 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {