1
0
Fork 0

arm64 fixes for -rc5

- Fix module loading when KASLR is configured but disabled at runtime
 
 - Fix accidental IPI when mapping user executable pages
 
 - Ensure hyp-stub and KVM world switch code cannot be kprobed
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAlxUWvEACgkQt6xw3ITB
 YzTNGwgAgAk1D4xfe01rasMK8dnogIMNbpxeeAuiwwFuXq/lBqo2UDX8UMt6qW9k
 b0rLpzyVelhPRJ3lQ90XvIv279cFBjHQT42YyVLlAnzkYz89kPKZweE7cWetO3sV
 kB7uJIaCfstGAp13p2ztN1KzHeIFKxk14gUa9jgmVdbHdJt+L5WdNUExs4+0jgnq
 lxBPWwvluzEc6mmpniIpF7em+svIci5M4al2gjxKVO7DI5slphOEZ4+VSGeSDxTI
 jOzTilS8UfldEnwjndl/hKes9r65afeLSDXvkJTTpfXB9jnKS54FJzy1QCibSRLT
 SHY9xkA3VEgJBifmpCEuJoNs2XaBFQ==
 =Eh1r
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Although we're still debugging a few minor arm64-specific issues in
  mainline, I didn't want to hold this lot up in the meantime.

  We've got an additional KASLR fix after the previous one wasn't quite
  complete, a fix for a performance regression when mapping executable
  pages into userspace and some fixes for kprobe blacklisting. All
  candidates for stable.

  Summary:

   - Fix module loading when KASLR is configured but disabled at runtime

   - Fix accidental IPI when mapping user executable pages

   - Ensure hyp-stub and KVM world switch code cannot be kprobed"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: hibernate: Clean the __hyp_text to PoC after resume
  arm64: hyp-stub: Forbid kprobing of the hyp-stub
  arm64: kprobe: Always blacklist the KVM world-switch code
  arm64: kaslr: ensure randomized quantities are clean also when kaslr is off
  arm64: Do not issue IPIs for user executable ptes
This commit is contained in:
Linus Torvalds 2019-02-01 16:54:25 -08:00
commit 8b050fe42d
5 changed files with 14 additions and 5 deletions

View file

@ -299,8 +299,10 @@ int swsusp_arch_suspend(void)
dcache_clean_range(__idmap_text_start, __idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed())
if (el2_reset_needed()) {
dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
dcache_clean_range(__hyp_text_start, __hyp_text_end);
}
/* make the crash dump kernel image protected again */
crash_post_resume();

View file

@ -28,6 +28,8 @@
#include <asm/virt.h>
.text
.pushsection .hyp.text, "ax"
.align 11
ENTRY(__hyp_stub_vectors)

View file

@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,

View file

@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
addr < (unsigned long)__entry_text_end) ||
(addr >= (unsigned long)__idmap_text_start &&
addr < (unsigned long)__idmap_text_end) ||
(addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
!!search_exception_tables(addr))
return true;
if (!is_kernel_in_hyp_mode()) {
if ((addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
(addr >= (unsigned long)__hyp_idmap_text_start &&
if ((addr >= (unsigned long)__hyp_idmap_text_start &&
addr < (unsigned long)__hyp_idmap_text_end))
return true;
}

View file

@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len)
__clean_dcache_area_pou(kaddr, len);
__flush_icache_all();
} else {
flush_icache_range(addr, addr + len);
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
__flush_icache_range(addr, addr + len);
}
}