1
0
Fork 0

x86: Use new cache mode type in arch/x86/mm/init_64.c

Instead of directly using the cache mode bits in the pte switch to
using the cache mode type.

Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stefan.bader@canonical.com
Cc: xen-devel@lists.xensource.com
Cc: konrad.wilk@oracle.com
Cc: ville.syrjala@linux.intel.com
Cc: david.vrabel@citrix.com
Cc: jbeulich@suse.com
Cc: toshi.kani@hp.com
Cc: plagnioj@jcrosoft.com
Cc: tomi.valkeinen@ti.com
Cc: bhelgaas@google.com
Link: http://lkml.kernel.org/r/1415019724-4317-7-git-send-email-jgross@suse.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hifive-unleashed-5.1
Juergen Gross 2014-11-03 14:01:52 +01:00 committed by Thomas Gleixner
parent 1c64216be1
commit 2df58b6d35
1 changed files with 6 additions and 3 deletions

View File

@ -337,12 +337,15 @@ pte_t * __init populate_extra_pte(unsigned long vaddr)
* Create large page table mappings for a range of physical addresses.
*/
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
pgprot_t prot)
enum page_cache_mode cache)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pgprot_t prot;
pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
pgd = pgd_offset_k((unsigned long)__va(phys));
@ -365,12 +368,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{
__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
}
void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{
__init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
__init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
}
/*