1
0
Fork 0

RISC-V Fixes for 5.6-rc4

This tag contains a handful of RISC-V related fixes that I've collected and
 would like to target for 5.6-rc4:
 
 * A fix to set up the PMPs on boot, which allows the kernel to access memory on
   systems that don't set up permissive PMPs before getting to Linux.  This only
   effects machine-mode kernels, which currently means only NOMMU kernels.
 * A fix to avoid enabling supervisor-mode interrupts when running in
   machine-mode, also only for NOMMU kernels.
 * A pair of fixes to our KASAN support to avoid corrupting memory.
 * A gitignore fix.
 
 This boots on QEMU's virt board for me.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAl5UZC4THHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYiYR3D/9YOz4JMp1rGySxxBctWwiO3WyPDcce
 y1+QWizzto7sPl6wrQCO2mehWRKzjWVa1fBovgE/NewEIjaFd5sbhB/JZ2FUiUCU
 OJo8j8TrBp3CvHIlfYKSuZrRHwFUt4KeLo22KoGpTQDhhpDjgSAwnUSjfykEEiLc
 xAtSfoHUgrYBFNe78J9Yz61gc5zNYb7iTsgf1Av6S2hiwwlLRtqUEtoO+dK9uo8f
 hIadaO8UWGJU+Zz1JN7tboP/rixRdNUCbacoeRLQ8cmo3vuNfHH9E1i15QiSv1lx
 xCDDk9imZN1G2kL26Irgivg0eh8NRczfabfKSnMrEEsCvYG0Mo3nwvezPTJvDqIB
 7nFpxUj2jDu/Q0t7rgANs61tRy0fyPA2q/Hbn+IPn4cv/taUaSdQCr0sHBaPN2D7
 MnXYtXNYwqPGqK4OI25qXkIPOlgbJCfUa9C3evW2lq7L/oK5WQzhfXHBKL+SYwWI
 5nQLRewDj8e7KJBAY6/ODJ6QU83mQxvueFQG16oisYdDE+crdWxJ6GhmSWrF1B8y
 sziCMHiWLt5GNCoHf47esg44Wj824aG4ZNmJkNgSwv2YBTNgKDbU7ejue8x/ZEls
 ZmBEPFw88QenOMUkCEwcsmIJcVuxLqGAZe57ROpHQ/uLiO64pu5+unpWHkNihyVQ
 jAUWa/iGFezBBg==
 =r0RY
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linux-5.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:
 "This contains a handful of RISC-V related fixes that I've collected
  and would like to target for 5.6-rc4:

   - A fix to set up the PMPs on boot, which allows the kernel to access
     memory on systems that don't set up permissive PMPs before getting
     to Linux. This only effects machine-mode kernels, which currently
     means only NOMMU kernels.

   - A fix to avoid enabling supervisor-mode interrupts when running in
     machine-mode, also only for NOMMU kernels.

   - A pair of fixes to our KASAN support to avoid corrupting memory.

   - A gitignore fix.

  This boots on QEMU's virt board for me"

* tag 'riscv-for-linux-5.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: adjust the indent
  riscv: allocate a complete page size for each page table
  riscv: Fix gitignore
  RISC-V: Don't enable all interrupts in trap_init()
  riscv: set pmp configuration if kernel is running in M-mode
alistair/sensors
Linus Torvalds 2020-02-25 10:14:39 -08:00
commit c5f8689118
5 changed files with 53 additions and 24 deletions

View File

@ -1,2 +1,4 @@
Image
Image.gz
loader
loader.lds

View File

@ -72,6 +72,16 @@
#define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15
/* PMP configuration */
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
#define PMP_A 0x18
#define PMP_A_TOR 0x08
#define PMP_A_NA4 0x10
#define PMP_A_NAPOT 0x18
#define PMP_L 0x80
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
@ -100,6 +110,8 @@
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
#define CSR_MHARTID 0xf14
#ifdef CONFIG_RISCV_M_MODE

View File

@ -58,6 +58,12 @@ _start_kernel:
/* Reset all registers except ra, a0, a1 */
call reset_regs
/* Setup a PMP to permit access to all of memory. */
li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
/*
* The hartid in a0 is expected later on, and we have no firmware
* to hand it to us.

View File

@ -156,6 +156,6 @@ void __init trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
/* Enable all interrupts */
csr_write(CSR_IE, -1);
/* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE);
}

View File

@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL));
PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));
pfn_pmd(PFN_DOWN
(__pa((uintptr_t) kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
/* init for swapper_pg_dir */
pgd = pgd_offset_k(KASAN_SHADOW_START);
@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void)
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
}
static void __init populate(void *start, void *end)
{
unsigned long i;
unsigned long i, offset;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
unsigned long n_ptes =
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
unsigned long n_pmds =
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
n_pages / PTRS_PER_PTE;
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
pte_t *pte =
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
pmd_t *pmd =
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; i++) {
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
set_pmd(&pmd[i],
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
__pgprot(_PAGE_TABLE)));
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
set_pgd(&pgd[i],
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
@ -81,7 +89,8 @@ void __init kasan_init(void)
unsigned long i;
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
(void *)kasan_mem_to_shadow((void *)
VMALLOC_END));
for_each_memblock(memory, reg) {
void *start = (void *)__va(reg->base);
@ -90,14 +99,14 @@ void __init kasan_init(void)
if (start >= end)
break;
populate(kasan_mem_to_shadow(start),
kasan_mem_to_shadow(end));
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
};
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
__pgprot(_PAGE_PRESENT | _PAGE_READ |
_PAGE_ACCESSED)));
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
init_task.kasan_depth = 0;