1
0
Fork 0

s390/kaslr: reserve memory for kasan usage

Sometimes the kernel fails to boot with:
"The Linux kernel failed to boot with the KernelAddressSanitizer:
out of memory during initialisation" even with big amounts of memory when
both kaslr and kasan are enabled.

The problem is that kasan initialization code requires 1/8 of physical
memory plus some for page tables. To keep as much code instrumented
as possible kasan avoids using memblock for memory allocations. Instead
kasan uses trivial memory allocator which simply chops off the memory
from the end of online physical memory. For that reason when kaslr is
enabled together with kasan avoid positioning kernel into upper memory
region which would be utilized during kasan initialization.

Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
alistair/sunxi64-5.4-dsi
Vasily Gorbik 2019-08-19 23:19:00 +02:00
parent 9b692102d8
commit 759d4899d9
1 changed files with 33 additions and 8 deletions

View File

@ -3,6 +3,7 @@
* Copyright IBM Corp. 2019
*/
#include <asm/mem_detect.h>
#include <asm/pgtable.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@ -89,8 +90,10 @@ static unsigned long get_random(unsigned long limit)
unsigned long get_random_base(unsigned long safe_addr)
{
unsigned long memory_limit = memory_end_set ? memory_end : 0;
unsigned long base, start, end, kernel_size;
unsigned long block_sum, offset;
unsigned long kasan_needs;
int i;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
@ -99,14 +102,36 @@ unsigned long get_random_base(unsigned long safe_addr)
}
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
if ((IS_ENABLED(CONFIG_KASAN))) {
/*
* Estimate kasan memory requirements, which it will reserve
* at the very end of available physical memory. To estimate
* that, we take into account that kasan would require
* 1/8 of available physical memory (for shadow memory) +
* creating page tables for the whole memory + shadow memory
* region (1 + 1/8). To keep page tables estimates simple take
* the double of combined ptes size.
*/
memory_limit = get_mem_detect_end();
if (memory_end_set && memory_limit > memory_end)
memory_limit = memory_end;
/* for shadow memory */
kasan_needs = memory_limit / 8;
/* for paging structures */
kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
_PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
memory_limit -= kasan_needs;
}
kernel_size = vmlinux.image_size + vmlinux.bss_size;
block_sum = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
if (memory_limit) {
if (start >= memory_limit)
break;
if (end > memory_end)
end = memory_end;
if (end > memory_limit)
end = memory_limit;
}
if (end - start < kernel_size)
continue;
@ -124,11 +149,11 @@ unsigned long get_random_base(unsigned long safe_addr)
base = safe_addr;
block_sum = offset = 0;
for_each_mem_detect_block(i, &start, &end) {
if (memory_end_set) {
if (start >= memory_end)
if (memory_limit) {
if (start >= memory_limit)
break;
if (end > memory_end)
end = memory_end;
if (end > memory_limit)
end = memory_limit;
}
if (end - start < kernel_size)
continue;