Merge branch 'akpm' (patches from Andrew)

Merge yet more updates from Andrew Morton:

 - various hotfixes

 - kexec_file updates and feature work

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (27 commits)
  kernel/kexec_file.c: move purgatories sha256 to common code
  kernel/kexec_file.c: allow archs to set purgatory load address
  kernel/kexec_file.c: remove mis-use of sh_offset field during purgatory load
  kernel/kexec_file.c: remove unneeded variables in kexec_purgatory_setup_sechdrs
  kernel/kexec_file.c: remove unneeded for-loop in kexec_purgatory_setup_sechdrs
  kernel/kexec_file.c: split up __kexec_load_puragory
  kernel/kexec_file.c: use read-only sections in arch_kexec_apply_relocations*
  kernel/kexec_file.c: search symbols in read-only kexec_purgatory
  kernel/kexec_file.c: make purgatory_info->ehdr const
  kernel/kexec_file.c: remove checks in kexec_purgatory_load
  include/linux/kexec.h: silence compile warnings
  kexec_file, x86: move re-factored code to generic side
  x86: kexec_file: clean up prepare_elf64_headers()
  x86: kexec_file: lift CRASH_MAX_RANGES limit on crash_mem buffer
  x86: kexec_file: remove X86_64 dependency from prepare_elf64_headers()
  x86: kexec_file: purge system-ram walking from prepare_elf64_headers()
  kexec_file,x86,powerpc: factor out kexec_file_ops functions
  kexec_file: make use of purgatory optional
  proc: revalidate misc dentries
  mm, slab: reschedule cache_reap() on the same CPU
  ...
This commit is contained in:
Linus Torvalds 2018-04-14 08:50:50 -07:00
commit 18b7fd1c93
29 changed files with 684 additions and 657 deletions

View file

@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)

View file

@ -552,6 +552,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to a list of segments as is the for kernel and initramfs as opposed to a list of segments as is the
case for the older kexec call. case for the older kexec call.
config ARCH_HAS_KEXEC_PURGATORY
def_bool KEXEC_FILE
config RELOCATABLE config RELOCATABLE
bool "Build a relocatable kernel" bool "Build a relocatable kernel"
depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE)) depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))

View file

@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void)
} }
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
extern struct kexec_file_ops kexec_elf64_ops; extern const struct kexec_file_ops kexec_elf64_ops;
#ifdef CONFIG_IMA_KEXEC #ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH #define ARCH_HAS_KIMAGE_ARCH

View file

@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
{ {
int ret; int ret;
unsigned int fdt_size; unsigned int fdt_size;
unsigned long kernel_load_addr, purgatory_load_addr; unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr; unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt; void *fdt;
const void *slave_code; const void *slave_code;
@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
struct elf_info elf_info; struct elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0, struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size }; .buf_max = ppc64_rma_size };
struct kexec_buf pbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size, .top_down = true };
ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret) if (ret)
@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr); pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true, ret = kexec_load_purgatory(image, &pbuf);
&purgatory_load_addr);
if (ret) { if (ret) {
pr_err("Loading purgatory failed.\n"); pr_err("Loading purgatory failed.\n");
goto out; goto out;
} }
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
if (initrd != NULL) { if (initrd != NULL) {
kbuf.buffer = initrd; kbuf.buffer = initrd;
@ -657,7 +658,7 @@ out:
return ret ? ERR_PTR(ret) : fdt; return ret ? ERR_PTR(ret) : fdt;
} }
struct kexec_file_ops kexec_elf64_ops = { const struct kexec_file_ops kexec_elf64_ops = {
.probe = elf64_probe, .probe = elf64_probe,
.load = elf64_load, .load = elf64_load,
}; };

View file

@ -31,52 +31,19 @@
#define SLAVE_CODE_SIZE 256 #define SLAVE_CODE_SIZE 256
static struct kexec_file_ops *kexec_file_loaders[] = { const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf64_ops, &kexec_elf64_ops,
NULL
}; };
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len) unsigned long buf_len)
{ {
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
/* We don't support crash kernels yet. */ /* We don't support crash kernels yet. */
if (image->type == KEXEC_TYPE_CRASH) if (image->type == KEXEC_TYPE_CRASH)
return -EOPNOTSUPP; return -EOPNOTSUPP;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { return kexec_image_probe_default(image, buf, buf_len);
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
return image->fops->load(image, image->kernel_buf,
image->kernel_buf_len, image->initrd_buf,
image->initrd_buf_len, image->cmdline_buf,
image->cmdline_buf_len);
}
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
} }
/** /**

View file

@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)

View file

@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)

View file

@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1; return 1;
} }
/*
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
{ {

View file

@ -2008,6 +2008,9 @@ config KEXEC_FILE
for kernel and initramfs as opposed to list of segments as for kernel and initramfs as opposed to list of segments as
accepted by previous system call. accepted by previous system call.
config ARCH_HAS_KEXEC_PURGATORY
def_bool KEXEC_FILE
config KEXEC_VERIFY_SIG config KEXEC_VERIFY_SIG
bool "Verify kernel signature during kexec_file_load() syscall" bool "Verify kernel signature during kexec_file_load() syscall"
depends on KEXEC_FILE depends on KEXEC_FILE

View file

@ -2,6 +2,6 @@
#ifndef _ASM_KEXEC_BZIMAGE64_H #ifndef _ASM_KEXEC_BZIMAGE64_H
#define _ASM_KEXEC_BZIMAGE64_H #define _ASM_KEXEC_BZIMAGE64_H
extern struct kexec_file_ops kexec_bzImage64_ops; extern const struct kexec_file_ops kexec_bzImage64_ops;
#endif /* _ASM_KEXE_BZIMAGE64_H */ #endif /* _ASM_KEXE_BZIMAGE64_H */

View file

@ -38,37 +38,6 @@
#include <asm/virtext.h> #include <asm/virtext.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
/* This primarily represents number of split ranges due to exclusion */
#define CRASH_MAX_RANGES 16
struct crash_mem_range {
u64 start, end;
};
struct crash_mem {
unsigned int nr_ranges;
struct crash_mem_range ranges[CRASH_MAX_RANGES];
};
/* Misc data about ram ranges needed to prepare elf headers */
struct crash_elf_data {
struct kimage *image;
/*
* Total number of ram ranges we have after various adjustments for
* crash reserved region, etc.
*/
unsigned int max_nr_ranges;
/* Pointer to elf header */
void *ehdr;
/* Pointer to next phdr */
void *bufp;
struct crash_mem mem;
};
/* Used while preparing memory map entries for second kernel */ /* Used while preparing memory map entries for second kernel */
struct crash_memmap_data { struct crash_memmap_data {
struct boot_params *params; struct boot_params *params;
@ -218,124 +187,49 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
return 0; return 0;
} }
/* Gather all the required information to prepare elf headers for ram regions */ /* Gather all the required information to prepare elf headers for ram regions */
static void fill_up_crash_elf_data(struct crash_elf_data *ced, static struct crash_mem *fill_up_crash_elf_data(void)
struct kimage *image)
{ {
unsigned int nr_ranges = 0; unsigned int nr_ranges = 0;
struct crash_mem *cmem;
ced->image = image;
walk_system_ram_res(0, -1, &nr_ranges, walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback); get_nr_ram_ranges_callback);
if (!nr_ranges)
return NULL;
ced->max_nr_ranges = nr_ranges; /*
* Exclusion of crash region and/or crashk_low_res may cause
* another range split. So add extra two slots here.
*/
nr_ranges += 2;
cmem = vzalloc(sizeof(struct crash_mem) +
sizeof(struct crash_mem_range) * nr_ranges);
if (!cmem)
return NULL;
/* Exclusion of crash region could split memory ranges */ cmem->max_nr_ranges = nr_ranges;
ced->max_nr_ranges++; cmem->nr_ranges = 0;
/* If crashk_low_res is not 0, another range split possible */ return cmem;
if (crashk_low_res.end)
ced->max_nr_ranges++;
}
static int exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart, unsigned long long mend)
{
int i, j;
unsigned long long start, end;
struct crash_mem_range temp_range = {0, 0};
for (i = 0; i < mem->nr_ranges; i++) {
start = mem->ranges[i].start;
end = mem->ranges[i].end;
if (mstart > end || mend < start)
continue;
/* Truncate any area outside of range */
if (mstart < start)
mstart = start;
if (mend > end)
mend = end;
/* Found completely overlapping range */
if (mstart == start && mend == end) {
mem->ranges[i].start = 0;
mem->ranges[i].end = 0;
if (i < mem->nr_ranges - 1) {
/* Shift rest of the ranges to left */
for (j = i; j < mem->nr_ranges - 1; j++) {
mem->ranges[j].start =
mem->ranges[j+1].start;
mem->ranges[j].end =
mem->ranges[j+1].end;
}
}
mem->nr_ranges--;
return 0;
}
if (mstart > start && mend < end) {
/* Split original range */
mem->ranges[i].end = mstart - 1;
temp_range.start = mend + 1;
temp_range.end = end;
} else if (mstart != start)
mem->ranges[i].end = mstart - 1;
else
mem->ranges[i].start = mend + 1;
break;
}
/* If a split happend, add the split to array */
if (!temp_range.end)
return 0;
/* Split happened */
if (i == CRASH_MAX_RANGES - 1) {
pr_err("Too many crash ranges after split\n");
return -ENOMEM;
}
/* Location where new range should go */
j = i + 1;
if (j < mem->nr_ranges) {
/* Move over all ranges one slot towards the end */
for (i = mem->nr_ranges - 1; i >= j; i--)
mem->ranges[i + 1] = mem->ranges[i];
}
mem->ranges[j].start = temp_range.start;
mem->ranges[j].end = temp_range.end;
mem->nr_ranges++;
return 0;
} }
/* /*
* Look for any unwanted ranges between mstart, mend and remove them. This * Look for any unwanted ranges between mstart, mend and remove them. This
* might lead to split and split ranges are put in ced->mem.ranges[] array * might lead to split and split ranges are put in cmem->ranges[] array
*/ */
static int elf_header_exclude_ranges(struct crash_elf_data *ced, static int elf_header_exclude_ranges(struct crash_mem *cmem)
unsigned long long mstart, unsigned long long mend)
{ {
struct crash_mem *cmem = &ced->mem;
int ret = 0; int ret = 0;
memset(cmem->ranges, 0, sizeof(cmem->ranges));
cmem->ranges[0].start = mstart;
cmem->ranges[0].end = mend;
cmem->nr_ranges = 1;
/* Exclude crashkernel region */ /* Exclude crashkernel region */
ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
if (ret) if (ret)
return ret; return ret;
if (crashk_low_res.end) { if (crashk_low_res.end) {
ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
crashk_low_res.end);
if (ret) if (ret)
return ret; return ret;
} }
@ -345,144 +239,12 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
{ {
struct crash_elf_data *ced = arg; struct crash_mem *cmem = arg;
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
unsigned long mstart, mend;
struct kimage *image = ced->image;
struct crash_mem *cmem;
int ret, i;
ehdr = ced->ehdr; cmem->ranges[cmem->nr_ranges].start = res->start;
cmem->ranges[cmem->nr_ranges].end = res->end;
cmem->nr_ranges++;
/* Exclude unwanted mem ranges */
ret = elf_header_exclude_ranges(ced, res->start, res->end);
if (ret)
return ret;
/* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
cmem = &ced->mem;
for (i = 0; i < cmem->nr_ranges; i++) {
mstart = cmem->ranges[i].start;
mend = cmem->ranges[i].end;
phdr = ced->bufp;
ced->bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = mstart;
/*
* If a range matches backup region, adjust offset to backup
* segment.
*/
if (mstart == image->arch.backup_src_start &&
(mend - mstart + 1) == image->arch.backup_src_sz)
phdr->p_offset = image->arch.backup_load_addr;
phdr->p_paddr = mstart;
phdr->p_vaddr = (unsigned long long) __va(mstart);
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
phdr->p_align = 0;
ehdr->e_phnum++;
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
ehdr->e_phnum, phdr->p_offset);
}
return ret;
}
static int prepare_elf64_headers(struct crash_elf_data *ced,
void **addr, unsigned long *sz)
{
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
unsigned char *buf, *bufp;
unsigned int cpu;
unsigned long long notes_addr;
int ret;
/* extra phdr for vmcoreinfo elf note */
nr_phdr = nr_cpus + 1;
nr_phdr += ced->max_nr_ranges;
/*
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
* area on x86_64 (ffffffff80000000 - ffffffffa0000000).
* I think this is required by tools like gdb. So same physical
* memory will be mapped in two elf headers. One will contain kernel
* text virtual addresses and other will have __va(physical) addresses.
*/
nr_phdr++;
elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
buf = vzalloc(elf_sz);
if (!buf)
return -ENOMEM;
bufp = buf;
ehdr = (Elf64_Ehdr *)bufp;
bufp += sizeof(Elf64_Ehdr);
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
ehdr->e_ident[EI_OSABI] = ELF_OSABI;
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
ehdr->e_type = ET_CORE;
ehdr->e_machine = ELF_ARCH;
ehdr->e_version = EV_CURRENT;
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
/* Prepare one phdr of type PT_NOTE for each present cpu */
for_each_present_cpu(cpu) {
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
phdr->p_offset = phdr->p_paddr = notes_addr;
phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
(ehdr->e_phnum)++;
}
/* Prepare one PT_NOTE header for vmcoreinfo */
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
#ifdef CONFIG_X86_64
/* Prepare PT_LOAD type program header for kernel text region */
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_vaddr = (Elf64_Addr)_text;
phdr->p_filesz = phdr->p_memsz = _end - _text;
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
(ehdr->e_phnum)++;
#endif
/* Prepare PT_LOAD headers for system ram chunks. */
ced->ehdr = ehdr;
ced->bufp = bufp;
ret = walk_system_ram_res(0, -1, ced,
prepare_elf64_ram_headers_callback);
if (ret < 0)
return ret;
*addr = buf;
*sz = elf_sz;
return 0; return 0;
} }
@ -490,18 +252,46 @@ static int prepare_elf64_headers(struct crash_elf_data *ced,
static int prepare_elf_headers(struct kimage *image, void **addr, static int prepare_elf_headers(struct kimage *image, void **addr,
unsigned long *sz) unsigned long *sz)
{ {
struct crash_elf_data *ced; struct crash_mem *cmem;
int ret; Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
int ret, i;
ced = kzalloc(sizeof(*ced), GFP_KERNEL); cmem = fill_up_crash_elf_data();
if (!ced) if (!cmem)
return -ENOMEM; return -ENOMEM;
fill_up_crash_elf_data(ced, image); ret = walk_system_ram_res(0, -1, cmem,
prepare_elf64_ram_headers_callback);
if (ret)
goto out;
/* Exclude unwanted mem ranges */
ret = elf_header_exclude_ranges(cmem);
if (ret)
goto out;
/* By default prepare 64bit headers */ /* By default prepare 64bit headers */
ret = prepare_elf64_headers(ced, addr, sz); ret = crash_prepare_elf64_headers(cmem,
kfree(ced); IS_ENABLED(CONFIG_X86_64), addr, sz);
if (ret)
goto out;
/*
* If a range matches backup region, adjust offset to backup
* segment.
*/
ehdr = (Elf64_Ehdr *)*addr;
phdr = (Elf64_Phdr *)(ehdr + 1);
for (i = 0; i < ehdr->e_phnum; phdr++, i++)
if (phdr->p_type == PT_LOAD &&
phdr->p_paddr == image->arch.backup_src_start &&
phdr->p_memsz == image->arch.backup_src_sz) {
phdr->p_offset = image->arch.backup_load_addr;
break;
}
out:
vfree(cmem);
return ret; return ret;
} }
@ -547,14 +337,14 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
/* Exclude Backup region */ /* Exclude Backup region */
start = image->arch.backup_load_addr; start = image->arch.backup_load_addr;
end = start + image->arch.backup_src_sz - 1; end = start + image->arch.backup_src_sz - 1;
ret = exclude_mem_range(cmem, start, end); ret = crash_exclude_mem_range(cmem, start, end);
if (ret) if (ret)
return ret; return ret;
/* Exclude elf header region */ /* Exclude elf header region */
start = image->arch.elf_load_addr; start = image->arch.elf_load_addr;
end = start + image->arch.elf_headers_sz - 1; end = start + image->arch.elf_headers_sz - 1;
return exclude_mem_range(cmem, start, end); return crash_exclude_mem_range(cmem, start, end);
} }
/* Prepare memory map for crash dump kernel */ /* Prepare memory map for crash dump kernel */

View file

@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned long setup_header_size, params_cmdline_sz; unsigned long setup_header_size, params_cmdline_sz;
struct boot_params *params; struct boot_params *params;
unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr; unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
unsigned long purgatory_load_addr;
struct bzimage64_data *ldata; struct bzimage64_data *ldata;
struct kexec_entry64_regs regs64; struct kexec_entry64_regs regs64;
void *stack; void *stack;
@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset; unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX, struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
.top_down = true }; .top_down = true };
struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR,
.buf_max = ULONG_MAX, .top_down = true };
header = (struct setup_header *)(kernel + setup_hdr_offset); header = (struct setup_header *)(kernel + setup_hdr_offset);
setup_sects = header->setup_sects; setup_sects = header->setup_sects;
@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* Load purgatory. For 64bit entry point, purgatory code can be * Load purgatory. For 64bit entry point, purgatory code can be
* anywhere. * anywhere.
*/ */
ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1, ret = kexec_load_purgatory(image, &pbuf);
&purgatory_load_addr);
if (ret) { if (ret) {
pr_err("Loading purgatory failed\n"); pr_err("Loading purgatory failed\n");
return ERR_PTR(ret); return ERR_PTR(ret);
} }
pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
/* /*
@ -538,7 +538,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
} }
#endif #endif
struct kexec_file_ops kexec_bzImage64_ops = { const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe, .probe = bzImage64_probe,
.load = bzImage64_load, .load = bzImage64_load,
.cleanup = bzImage64_cleanup, .cleanup = bzImage64_cleanup,

View file

@ -30,8 +30,9 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
static struct kexec_file_ops *kexec_file_loaders[] = { const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_bzImage64_ops, &kexec_bzImage64_ops,
NULL
}; };
#endif #endif
@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void)
/* arch-dependent functionality related to kexec file-based syscall */ /* arch-dependent functionality related to kexec file-based syscall */
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
int i, ret = -ENOEXEC;
struct kexec_file_ops *fops;
for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
fops = kexec_file_loaders[i];
if (!fops || !fops->probe)
continue;
ret = fops->probe(buf, buf_len);
if (!ret) {
image->fops = fops;
return ret;
}
}
return ret;
}
void *arch_kexec_kernel_image_load(struct kimage *image) void *arch_kexec_kernel_image_load(struct kimage *image)
{ {
vfree(image->arch.elf_headers); vfree(image->arch.elf_headers);
@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image)
image->cmdline_buf_len); image->cmdline_buf_len);
} }
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
}
#ifdef CONFIG_KEXEC_VERIFY_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
unsigned long kernel_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.");
return -EKEYREJECTED;
}
return image->fops->verify_sig(kernel, kernel_len);
}
#endif
/* /*
* Apply purgatory relocations. * Apply purgatory relocations.
* *
* ehdr: Pointer to elf headers * @pi: Purgatory to be relocated.
* sechdrs: Pointer to section headers. * @section: Section relocations applying to.
* relsec: section index of SHT_RELA section. * @relsec: Section containing RELAs.
* @symtabsec: Corresponding symtab.
* *
* TODO: Some of the code belongs to generic code. Move that in kexec.c. * TODO: Some of the code belongs to generic code. Move that in kexec.c.
*/ */
int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf64_Shdr *sechdrs, unsigned int relsec) Elf_Shdr *section, const Elf_Shdr *relsec,
const Elf_Shdr *symtabsec)
{ {
unsigned int i; unsigned int i;
Elf64_Rela *rel; Elf64_Rela *rel;
Elf64_Sym *sym; Elf64_Sym *sym;
void *location; void *location;
Elf64_Shdr *section, *symtabsec;
unsigned long address, sec_base, value; unsigned long address, sec_base, value;
const char *strtab, *name, *shstrtab; const char *strtab, *name, *shstrtab;
const Elf_Shdr *sechdrs;
/* /* String & section header string table */
* ->sh_offset has been modified to keep the pointer to section sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
* contents in memory strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset;
*/ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
rel = (void *)sechdrs[relsec].sh_offset;
/* Section to which relocations apply */ rel = (void *)pi->ehdr + relsec->sh_offset;
section = &sechdrs[sechdrs[relsec].sh_info];
pr_debug("Applying relocate section %u to %u\n", relsec, pr_debug("Applying relocate section %s to %u\n",
sechdrs[relsec].sh_info); shstrtab + relsec->sh_name, relsec->sh_info);
/* Associated symbol table */ for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) {
symtabsec = &sechdrs[sechdrs[relsec].sh_link];
/* String table */
if (symtabsec->sh_link >= ehdr->e_shnum) {
/* Invalid strtab section number */
pr_err("Invalid string table section index %d\n",
symtabsec->sh_link);
return -ENOEXEC;
}
strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
/* section header string table */
shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* /*
* rel[i].r_offset contains byte offset from beginning * rel[i].r_offset contains byte offset from beginning
* of section to the storage unit affected. * of section to the storage unit affected.
* *
* This is location to update (->sh_offset). This is temporary * This is location to update. This is temporary buffer
* buffer where section is currently loaded. This will finally * where section is currently loaded. This will finally be
* be loaded to a different address later, pointed to by * loaded to a different address later, pointed to by
* ->sh_addr. kexec takes care of moving it * ->sh_addr. kexec takes care of moving it
* (kexec_load_segment()). * (kexec_load_segment()).
*/ */
location = (void *)(section->sh_offset + rel[i].r_offset); location = pi->purgatory_buf;
location += section->sh_offset;
location += rel[i].r_offset;
/* Final address of the location */ /* Final address of the location */
address = section->sh_addr + rel[i].r_offset; address = section->sh_addr + rel[i].r_offset;
@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
* to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
* these respectively. * these respectively.
*/ */
sym = (Elf64_Sym *)symtabsec->sh_offset + sym = (void *)pi->ehdr + symtabsec->sh_offset;
ELF64_R_SYM(rel[i].r_info); sym += ELF64_R_SYM(rel[i].r_info);
if (sym->st_name) if (sym->st_name)
name = strtab + sym->st_name; name = strtab + sym->st_name;
@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
if (sym->st_shndx == SHN_ABS) if (sym->st_shndx == SHN_ABS)
sec_base = 0; sec_base = 0;
else if (sym->st_shndx >= ehdr->e_shnum) { else if (sym->st_shndx >= pi->ehdr->e_shnum) {
pr_err("Invalid section %d for symbol %s\n", pr_err("Invalid section %d for symbol %s\n",
sym->st_shndx, name); sym->st_shndx, name);
return -ENOEXEC; return -ENOEXEC;
} else } else
sec_base = sechdrs[sym->st_shndx].sh_addr; sec_base = pi->sechdrs[sym->st_shndx].sh_addr;
value = sym->st_value; value = sym->st_value;
value += sec_base; value += sec_base;

View file

@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
targets += $(purgatory-y) targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/sha256.c
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
targets += purgatory.ro targets += purgatory.ro

View file

@ -11,9 +11,9 @@
*/ */
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/sha256.h>
#include <asm/purgatory.h> #include <asm/purgatory.h>
#include "sha256.h"
#include "../boot/string.h" #include "../boot/string.h"
unsigned long purgatory_backup_dest __section(.kexec-purgatory); unsigned long purgatory_backup_dest __section(.kexec-purgatory);

View file

@ -10,4 +10,16 @@
* Version 2. See the file COPYING for more details. * Version 2. See the file COPYING for more details.
*/ */
#include <linux/types.h>
#include "../boot/string.c" #include "../boot/string.c"
void *memcpy(void *dst, const void *src, size_t len)
{
return __builtin_memcpy(dst, src, len);
}
void *memset(void *dst, int c, size_t len)
{
return __builtin_memset(dst, c, len);
}

View file

@ -15,6 +15,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/namei.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/mount.h> #include <linux/mount.h>
@ -217,6 +218,26 @@ void proc_free_inum(unsigned int inum)
ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); ida_simple_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
} }
static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags)
{
if (flags & LOOKUP_RCU)
return -ECHILD;
if (atomic_read(&PDE(d_inode(dentry))->in_use) < 0)
return 0; /* revalidate */
return 1;
}
static int proc_misc_d_delete(const struct dentry *dentry)
{
return atomic_read(&PDE(d_inode(dentry))->in_use) < 0;
}
static const struct dentry_operations proc_misc_dentry_ops = {
.d_revalidate = proc_misc_d_revalidate,
.d_delete = proc_misc_d_delete,
};
/* /*
* Don't create negative dentries here, return -ENOENT by hand * Don't create negative dentries here, return -ENOENT by hand
* instead. * instead.
@ -234,7 +255,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
inode = proc_get_inode(dir->i_sb, de); inode = proc_get_inode(dir->i_sb, de);
if (!inode) if (!inode)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
d_set_d_op(dentry, &simple_dentry_operations); d_set_d_op(dentry, &proc_misc_dentry_ops);
d_add(dentry, inode); d_add(dentry, inode);
return NULL; return NULL;
} }

View file

@ -99,21 +99,25 @@ struct compat_kexec_segment {
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
struct purgatory_info { struct purgatory_info {
/* Pointer to elf header of read only purgatory */ /*
Elf_Ehdr *ehdr; * Pointer to elf header at the beginning of kexec_purgatory.
* Note: kexec_purgatory is read only
/* Pointer to purgatory sechdrs which are modifiable */ */
const Elf_Ehdr *ehdr;
/*
* Temporary, modifiable buffer for sechdrs used for relocation.
* This memory can be freed post image load.
*/
Elf_Shdr *sechdrs; Elf_Shdr *sechdrs;
/* /*
* Temporary buffer location where purgatory is loaded and relocated * Temporary, modifiable buffer for stripped purgatory used for
* This memory can be freed post image load * relocation. This memory can be freed post image load.
*/ */
void *purgatory_buf; void *purgatory_buf;
/* Address where purgatory is finally loaded and is executed from */
unsigned long purgatory_load_addr;
}; };
struct kimage;
typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size); typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf, typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd, unsigned long kernel_len, char *initrd,
@ -135,6 +139,11 @@ struct kexec_file_ops {
#endif #endif
}; };
extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
/** /**
* struct kexec_buf - parameters for finding a place for a buffer in memory * struct kexec_buf - parameters for finding a place for a buffer in memory
* @image: kexec image in which memory to search. * @image: kexec image in which memory to search.
@ -159,10 +168,44 @@ struct kexec_buf {
bool top_down; bool top_down;
}; };
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
int (*func)(struct resource *, void *)); int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf);
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
struct crash_mem_range {
u64 start, end;
};
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
struct crash_mem_range ranges[0];
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart,
unsigned long long mend);
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
void **addr, unsigned long *sz);
#endif /* CONFIG_KEXEC_FILE */ #endif /* CONFIG_KEXEC_FILE */
struct kimage { struct kimage {
@ -209,7 +252,7 @@ struct kimage {
unsigned long cmdline_buf_len; unsigned long cmdline_buf_len;
/* File operations provided by image loader */ /* File operations provided by image loader */
struct kexec_file_ops *fops; const struct kexec_file_ops *fops;
/* Image loader handling the kernel can store a pointer here */ /* Image loader handling the kernel can store a pointer here */
void *image_loader_data; void *image_loader_data;
@ -226,14 +269,6 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void); extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image, extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order); unsigned int order);
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
unsigned long max, int top_down,
unsigned long *load_addr);
extern int kexec_purgatory_get_set_symbol(struct kimage *image,
const char *name, void *buf,
unsigned int size, bool get_value);
extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
const char *name);
extern void __crash_kexec(struct pt_regs *); extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *); extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *); int kexec_should_crash(struct task_struct *);
@ -273,16 +308,6 @@ int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void); size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len);
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
Elf_Shdr *sechdrs, unsigned int relsec);
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned int relsec);
void arch_kexec_protect_crashkres(void); void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void); void arch_kexec_unprotect_crashkres(void);

View file

@ -13,6 +13,15 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
/*
* Stand-alone implementation of the SHA256 algorithm. It is designed to
* have as little dependencies as possible so it can be used in the
* kexec_file purgatory. In other cases you should use the implementation in
* crypto/.
*
* For details see lib/sha256.c
*/
extern int sha256_init(struct sha256_state *sctx); extern int sha256_init(struct sha256_state *sctx);
extern int sha256_update(struct sha256_state *sctx, const u8 *input, extern int sha256_update(struct sha256_state *sctx, const u8 *input,
unsigned int length); unsigned int length);

View file

@ -225,6 +225,12 @@ static int __shm_open(struct vm_area_struct *vma)
if (IS_ERR(shp)) if (IS_ERR(shp))
return PTR_ERR(shp); return PTR_ERR(shp);
if (shp->shm_file != sfd->file) {
/* ID was reused */
shm_unlock(shp);
return -EINVAL;
}
shp->shm_atim = ktime_get_real_seconds(); shp->shm_atim = ktime_get_real_seconds();
ipc_update_pid(&shp->shm_lprid, task_tgid(current)); ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_nattch++; shp->shm_nattch++;
@ -455,8 +461,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
int ret; int ret;
/* /*
* In case of remap_file_pages() emulation, the file can represent * In case of remap_file_pages() emulation, the file can represent an
* removed IPC ID: propogate shm_lock() error to caller. * IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller.
*/ */
ret = __shm_open(vma); ret = __shm_open(vma);
if (ret) if (ret)
@ -480,6 +487,7 @@ static int shm_release(struct inode *ino, struct file *file)
struct shm_file_data *sfd = shm_file_data(file); struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns); put_ipc_ns(sfd->ns);
fput(sfd->file);
shm_file_data(file) = NULL; shm_file_data(file) = NULL;
kfree(sfd); kfree(sfd);
return 0; return 0;
@ -1445,7 +1453,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
file->f_mapping = shp->shm_file->f_mapping; file->f_mapping = shp->shm_file->f_mapping;
sfd->id = shp->shm_perm.id; sfd->id = shp->shm_perm.id;
sfd->ns = get_ipc_ns(ns); sfd->ns = get_ipc_ns(ns);
sfd->file = shp->shm_file; /*
* We need to take a reference to the real shm file to prevent the
* pointer from becoming stale in cases where the lifetime of the outer
* file extends beyond that of the shm segment. It's not usually
* possible, but it can happen during remap_file_pages() emulation as
* that unmaps the memory, then does ->mmap() via file reference only.
* We'll deny the ->mmap() if the shm segment was since removed, but to
* detect shm ID reuse we need to compare the file pointers.
*/
sfd->file = get_file(shp->shm_file);
sfd->vm_ops = NULL; sfd->vm_ops = NULL;
err = security_mmap_file(file, prot, flags); err = security_mmap_file(file, prot, flags);

View file

@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_lru); VMCOREINFO_NUMBER(PG_lru);
VMCOREINFO_NUMBER(PG_private); VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache); VMCOREINFO_NUMBER(PG_swapcache);
VMCOREINFO_NUMBER(PG_swapbacked);
VMCOREINFO_NUMBER(PG_slab); VMCOREINFO_NUMBER(PG_slab);
#ifdef CONFIG_MEMORY_FAILURE #ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison); VMCOREINFO_NUMBER(PG_hwpoison);

View file

@ -22,50 +22,123 @@
#include <linux/ima.h> #include <linux/ima.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/slab.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "kexec_internal.h" #include "kexec_internal.h"
static int kexec_calculate_store_digests(struct kimage *image); static int kexec_calculate_store_digests(struct kimage *image);
/*
* Currently this is the only default function that is exported as some
* architectures need it to do additional handlings.
* In the future, other default functions may be exported too if required.
*/
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
const struct kexec_file_ops * const *fops;
int ret = -ENOEXEC;
for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
ret = (*fops)->probe(buf, buf_len);
if (!ret) {
image->fops = *fops;
return ret;
}
}
return ret;
}
/* Architectures can provide this probe function */ /* Architectures can provide this probe function */
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len) unsigned long buf_len)
{ {
return -ENOEXEC; return kexec_image_probe_default(image, buf, buf_len);
}
static void *kexec_image_load_default(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
return image->fops->load(image, image->kernel_buf,
image->kernel_buf_len, image->initrd_buf,
image->initrd_buf_len, image->cmdline_buf,
image->cmdline_buf_len);
} }
void * __weak arch_kexec_kernel_image_load(struct kimage *image) void * __weak arch_kexec_kernel_image_load(struct kimage *image)
{ {
return ERR_PTR(-ENOEXEC); return kexec_image_load_default(image);
}
static int kexec_image_post_load_cleanup_default(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
return image->fops->cleanup(image->image_loader_data);
} }
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
{ {
return -EINVAL; return kexec_image_post_load_cleanup_default(image);
} }
#ifdef CONFIG_KEXEC_VERIFY_SIG #ifdef CONFIG_KEXEC_VERIFY_SIG
static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len) unsigned long buf_len)
{ {
return -EKEYREJECTED; return kexec_image_verify_sig_default(image, buf, buf_len);
} }
#endif #endif
/* Apply relocations of type RELA */ /*
* arch_kexec_apply_relocations_add - apply relocations of type RELA
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELAs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
int __weak int __weak
arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
unsigned int relsec) const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{ {
pr_err("RELA relocation unsupported.\n"); pr_err("RELA relocation unsupported.\n");
return -ENOEXEC; return -ENOEXEC;
} }
/* Apply relocations of type REL */ /*
* arch_kexec_apply_relocations - apply relocations of type REL
* @pi: Purgatory to be relocated.
* @section: Section relocations applying to.
* @relsec: Section containing RELs.
* @symtab: Corresponding symtab.
*
* Return: 0 on success, negative errno on error.
*/
int __weak int __weak
arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
unsigned int relsec) const Elf_Shdr *relsec, const Elf_Shdr *symtab)
{ {
pr_err("REL relocation unsupported.\n"); pr_err("REL relocation unsupported.\n");
return -ENOEXEC; return -ENOEXEC;
@ -532,6 +605,9 @@ static int kexec_calculate_store_digests(struct kimage *image)
struct kexec_sha_region *sha_regions; struct kexec_sha_region *sha_regions;
struct purgatory_info *pi = &image->purgatory_info; struct purgatory_info *pi = &image->purgatory_info;
if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
return 0;
zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
zero_buf_sz = PAGE_SIZE; zero_buf_sz = PAGE_SIZE;
@ -633,87 +709,29 @@ out:
return ret; return ret;
} }
/* Actually load purgatory. Lot of code taken from kexec-tools */ #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
static int __kexec_load_purgatory(struct kimage *image, unsigned long min, /*
unsigned long max, int top_down) * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
* @pi: Purgatory to be loaded.
* @kbuf: Buffer to setup.
*
* Allocates the memory needed for the buffer. Caller is responsible to free
* the memory after use.
*
* Return: 0 on success, negative errno on error.
*/
static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
struct kexec_buf *kbuf)
{ {
struct purgatory_info *pi = &image->purgatory_info; const Elf_Shdr *sechdrs;
unsigned long align, bss_align, bss_sz, bss_pad; unsigned long bss_align;
unsigned long entry, load_addr, curr_load_addr, bss_addr, offset; unsigned long bss_sz;
unsigned char *buf_addr, *src; unsigned long align;
int i, ret = 0, entry_sidx = -1; int i, ret;
const Elf_Shdr *sechdrs_c;
Elf_Shdr *sechdrs = NULL;
struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
.buf_min = min, .buf_max = max,
.top_down = top_down };
/* sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
* sechdrs_c points to section headers in purgatory and are read kbuf->buf_align = bss_align = 1;
* only. No modifications allowed. kbuf->bufsz = bss_sz = 0;
*/
sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff;
/*
* We can not modify sechdrs_c[] and its fields. It is read only.
* Copy it over to a local copy where one can store some temporary
* data and free it at the end. We need to modify ->sh_addr and
* ->sh_offset fields to keep track of permanent and temporary
* locations of sections.
*/
sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
if (!sechdrs)
return -ENOMEM;
memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr));
/*
* We seem to have multiple copies of sections. First copy is which
* is embedded in kernel in read only section. Some of these sections
* will be copied to a temporary buffer and relocated. And these
* sections will finally be copied to their final destination at
* segment load time.
*
* Use ->sh_offset to reflect section address in memory. It will
* point to original read only copy if section is not allocatable.
* Otherwise it will point to temporary copy which will be relocated.
*
* Use ->sh_addr to contain final address of the section where it
* will go during execution time.
*/
for (i = 0; i < pi->ehdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_NOBITS)
continue;
sechdrs[i].sh_offset = (unsigned long)pi->ehdr +
sechdrs[i].sh_offset;
}
/*
* Identify entry point section and make entry relative to section
* start.
*/
entry = pi->ehdr->e_entry;
for (i = 0; i < pi->ehdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
if (!(sechdrs[i].sh_flags & SHF_EXECINSTR))
continue;
/* Make entry section relative */
if (sechdrs[i].sh_addr <= pi->ehdr->e_entry &&
((sechdrs[i].sh_addr + sechdrs[i].sh_size) >
pi->ehdr->e_entry)) {
entry_sidx = i;
entry -= sechdrs[i].sh_addr;
break;
}
}
/* Determine how much memory is needed to load relocatable object. */
bss_align = 1;
bss_sz = 0;
for (i = 0; i < pi->ehdr->e_shnum; i++) { for (i = 0; i < pi->ehdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC)) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@ -721,111 +739,124 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
align = sechdrs[i].sh_addralign; align = sechdrs[i].sh_addralign;
if (sechdrs[i].sh_type != SHT_NOBITS) { if (sechdrs[i].sh_type != SHT_NOBITS) {
if (kbuf.buf_align < align) if (kbuf->buf_align < align)
kbuf.buf_align = align; kbuf->buf_align = align;
kbuf.bufsz = ALIGN(kbuf.bufsz, align); kbuf->bufsz = ALIGN(kbuf->bufsz, align);
kbuf.bufsz += sechdrs[i].sh_size; kbuf->bufsz += sechdrs[i].sh_size;
} else { } else {
/* bss section */
if (bss_align < align) if (bss_align < align)
bss_align = align; bss_align = align;
bss_sz = ALIGN(bss_sz, align); bss_sz = ALIGN(bss_sz, align);
bss_sz += sechdrs[i].sh_size; bss_sz += sechdrs[i].sh_size;
} }
} }
kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
kbuf->memsz = kbuf->bufsz + bss_sz;
if (kbuf->buf_align < bss_align)
kbuf->buf_align = bss_align;
/* Determine the bss padding required to align bss properly */ kbuf->buffer = vzalloc(kbuf->bufsz);
bss_pad = 0; if (!kbuf->buffer)
if (kbuf.bufsz & (bss_align - 1)) return -ENOMEM;
bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1)); pi->purgatory_buf = kbuf->buffer;
kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz; ret = kexec_add_buffer(kbuf);
/* Allocate buffer for purgatory */
kbuf.buffer = vzalloc(kbuf.bufsz);
if (!kbuf.buffer) {
ret = -ENOMEM;
goto out;
}
if (kbuf.buf_align < bss_align)
kbuf.buf_align = bss_align;
/* Add buffer to segment list */
ret = kexec_add_buffer(&kbuf);
if (ret) if (ret)
goto out; goto out;
pi->purgatory_load_addr = kbuf.mem;
/* Load SHF_ALLOC sections */ return 0;
buf_addr = kbuf.buffer; out:
load_addr = curr_load_addr = pi->purgatory_load_addr; vfree(pi->purgatory_buf);
bss_addr = load_addr + kbuf.bufsz + bss_pad; pi->purgatory_buf = NULL;
return ret;
}
/*
* kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
* @pi: Purgatory to be loaded.
* @kbuf: Buffer prepared to store purgatory.
*
* Allocates the memory needed for the buffer. Caller is responsible to free
* the memory after use.
*
* Return: 0 on success, negative errno on error.
*/
static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
struct kexec_buf *kbuf)
{
unsigned long bss_addr;
unsigned long offset;
Elf_Shdr *sechdrs;
int i;
/*
* The section headers in kexec_purgatory are read-only. In order to
* have them modifiable make a temporary copy.
*/
sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr));
if (!sechdrs)
return -ENOMEM;
memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
pi->ehdr->e_shnum * sizeof(Elf_Shdr));
pi->sechdrs = sechdrs;
offset = 0;
bss_addr = kbuf->mem + kbuf->bufsz;
kbuf->image->start = pi->ehdr->e_entry;
for (i = 0; i < pi->ehdr->e_shnum; i++) { for (i = 0; i < pi->ehdr->e_shnum; i++) {
unsigned long align;
void *src, *dst;
if (!(sechdrs[i].sh_flags & SHF_ALLOC)) if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue; continue;
align = sechdrs[i].sh_addralign; align = sechdrs[i].sh_addralign;
if (sechdrs[i].sh_type != SHT_NOBITS) { if (sechdrs[i].sh_type == SHT_NOBITS) {
curr_load_addr = ALIGN(curr_load_addr, align);
offset = curr_load_addr - load_addr;
/* We already modifed ->sh_offset to keep src addr */
src = (char *) sechdrs[i].sh_offset;
memcpy(buf_addr + offset, src, sechdrs[i].sh_size);
/* Store load address and source address of section */
sechdrs[i].sh_addr = curr_load_addr;
/*
* This section got copied to temporary buffer. Update
* ->sh_offset accordingly.
*/
sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset);
/* Advance to the next address */
curr_load_addr += sechdrs[i].sh_size;
} else {
bss_addr = ALIGN(bss_addr, align); bss_addr = ALIGN(bss_addr, align);
sechdrs[i].sh_addr = bss_addr; sechdrs[i].sh_addr = bss_addr;
bss_addr += sechdrs[i].sh_size; bss_addr += sechdrs[i].sh_size;
} continue;
} }
/* Update entry point based on load address of text section */ offset = ALIGN(offset, align);
if (entry_sidx >= 0) if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
entry += sechdrs[entry_sidx].sh_addr; pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
pi->ehdr->e_entry < (sechdrs[i].sh_addr
+ sechdrs[i].sh_size)) {
kbuf->image->start -= sechdrs[i].sh_addr;
kbuf->image->start += kbuf->mem + offset;
}
/* Make kernel jump to purgatory after shutdown */ src = (void *)pi->ehdr + sechdrs[i].sh_offset;
image->start = entry; dst = pi->purgatory_buf + offset;
memcpy(dst, src, sechdrs[i].sh_size);
/* Used later to get/set symbol values */ sechdrs[i].sh_addr = kbuf->mem + offset;
pi->sechdrs = sechdrs; sechdrs[i].sh_offset = offset;
offset += sechdrs[i].sh_size;
}
/* return 0;
* Used later to identify which section is purgatory and skip it
* from checksumming.
*/
pi->purgatory_buf = kbuf.buffer;
return ret;
out:
vfree(sechdrs);
vfree(kbuf.buffer);
return ret;
} }
static int kexec_apply_relocations(struct kimage *image) static int kexec_apply_relocations(struct kimage *image)
{ {
int i, ret; int i, ret;
struct purgatory_info *pi = &image->purgatory_info; struct purgatory_info *pi = &image->purgatory_info;
Elf_Shdr *sechdrs = pi->sechdrs; const Elf_Shdr *sechdrs;
sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
/* Apply relocations */
for (i = 0; i < pi->ehdr->e_shnum; i++) { for (i = 0; i < pi->ehdr->e_shnum; i++) {
Elf_Shdr *section, *symtab; const Elf_Shdr *relsec;
const Elf_Shdr *symtab;
Elf_Shdr *section;
if (sechdrs[i].sh_type != SHT_RELA && relsec = sechdrs + i;
sechdrs[i].sh_type != SHT_REL)
if (relsec->sh_type != SHT_RELA &&
relsec->sh_type != SHT_REL)
continue; continue;
/* /*
@ -834,12 +865,12 @@ static int kexec_apply_relocations(struct kimage *image)
* symbol table. And ->sh_info contains section header * symbol table. And ->sh_info contains section header
* index of section to which relocations apply. * index of section to which relocations apply.
*/ */
if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || if (relsec->sh_info >= pi->ehdr->e_shnum ||
sechdrs[i].sh_link >= pi->ehdr->e_shnum) relsec->sh_link >= pi->ehdr->e_shnum)
return -ENOEXEC; return -ENOEXEC;
section = &sechdrs[sechdrs[i].sh_info]; section = pi->sechdrs + relsec->sh_info;
symtab = &sechdrs[sechdrs[i].sh_link]; symtab = sechdrs + relsec->sh_link;
if (!(section->sh_flags & SHF_ALLOC)) if (!(section->sh_flags & SHF_ALLOC))
continue; continue;
@ -856,12 +887,12 @@ static int kexec_apply_relocations(struct kimage *image)
* Respective architecture needs to provide support for applying * Respective architecture needs to provide support for applying
* relocations of type SHT_RELA/SHT_REL. * relocations of type SHT_RELA/SHT_REL.
*/ */
if (sechdrs[i].sh_type == SHT_RELA) if (relsec->sh_type == SHT_RELA)
ret = arch_kexec_apply_relocations_add(pi->ehdr, ret = arch_kexec_apply_relocations_add(pi, section,
sechdrs, i); relsec, symtab);
else if (sechdrs[i].sh_type == SHT_REL) else if (relsec->sh_type == SHT_REL)
ret = arch_kexec_apply_relocations(pi->ehdr, ret = arch_kexec_apply_relocations(pi, section,
sechdrs, i); relsec, symtab);
if (ret) if (ret)
return ret; return ret;
} }
@ -869,10 +900,18 @@ static int kexec_apply_relocations(struct kimage *image)
return 0; return 0;
} }
/* Load relocatable purgatory object and relocate it appropriately */ /*
int kexec_load_purgatory(struct kimage *image, unsigned long min, * kexec_load_purgatory - Load and relocate the purgatory object.
unsigned long max, int top_down, * @image: Image to add the purgatory to.
unsigned long *load_addr) * @kbuf: Memory parameters to use.
*
* Allocates the memory needed for image->purgatory_info.sechdrs and
* image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
* to free the memory after use.
*
* Return: 0 on success, negative errno on error.
*/
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
{ {
struct purgatory_info *pi = &image->purgatory_info; struct purgatory_info *pi = &image->purgatory_info;
int ret; int ret;
@ -880,55 +919,51 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
if (kexec_purgatory_size <= 0) if (kexec_purgatory_size <= 0)
return -EINVAL; return -EINVAL;
if (kexec_purgatory_size < sizeof(Elf_Ehdr)) pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
return -ENOEXEC;
pi->ehdr = (Elf_Ehdr *)kexec_purgatory; ret = kexec_purgatory_setup_kbuf(pi, kbuf);
if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
|| pi->ehdr->e_type != ET_REL
|| !elf_check_arch(pi->ehdr)
|| pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
return -ENOEXEC;
if (pi->ehdr->e_shoff >= kexec_purgatory_size
|| (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
kexec_purgatory_size - pi->ehdr->e_shoff))
return -ENOEXEC;
ret = __kexec_load_purgatory(image, min, max, top_down);
if (ret) if (ret)
return ret; return ret;
ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
if (ret)
goto out_free_kbuf;
ret = kexec_apply_relocations(image); ret = kexec_apply_relocations(image);
if (ret) if (ret)
goto out; goto out;
*load_addr = pi->purgatory_load_addr;
return 0; return 0;
out: out:
vfree(pi->sechdrs); vfree(pi->sechdrs);
pi->sechdrs = NULL; pi->sechdrs = NULL;
out_free_kbuf:
vfree(pi->purgatory_buf); vfree(pi->purgatory_buf);
pi->purgatory_buf = NULL; pi->purgatory_buf = NULL;
return ret; return ret;
} }
static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, /*
* kexec_purgatory_find_symbol - find a symbol in the purgatory
* @pi: Purgatory to search in.
* @name: Name of the symbol.
*
* Return: pointer to symbol in read-only symtab on success, NULL on error.
*/
static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
const char *name) const char *name)
{ {
Elf_Sym *syms; const Elf_Shdr *sechdrs;
Elf_Shdr *sechdrs; const Elf_Ehdr *ehdr;
Elf_Ehdr *ehdr; const Elf_Sym *syms;
int i, k;
const char *strtab; const char *strtab;
int i, k;
if (!pi->sechdrs || !pi->ehdr) if (!pi->ehdr)
return NULL; return NULL;
sechdrs = pi->sechdrs;
ehdr = pi->ehdr; ehdr = pi->ehdr;
sechdrs = (void *)ehdr + ehdr->e_shoff;
for (i = 0; i < ehdr->e_shnum; i++) { for (i = 0; i < ehdr->e_shnum; i++) {
if (sechdrs[i].sh_type != SHT_SYMTAB) if (sechdrs[i].sh_type != SHT_SYMTAB)
@ -937,8 +972,8 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
if (sechdrs[i].sh_link >= ehdr->e_shnum) if (sechdrs[i].sh_link >= ehdr->e_shnum)
/* Invalid strtab section number */ /* Invalid strtab section number */
continue; continue;
strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
syms = (Elf_Sym *)sechdrs[i].sh_offset; syms = (void *)ehdr + sechdrs[i].sh_offset;
/* Go through symbols for a match */ /* Go through symbols for a match */
for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
@ -966,7 +1001,7 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
{ {
struct purgatory_info *pi = &image->purgatory_info; struct purgatory_info *pi = &image->purgatory_info;
Elf_Sym *sym; const Elf_Sym *sym;
Elf_Shdr *sechdr; Elf_Shdr *sechdr;
sym = kexec_purgatory_find_symbol(pi, name); sym = kexec_purgatory_find_symbol(pi, name);
@ -989,9 +1024,9 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size, bool get_value) void *buf, unsigned int size, bool get_value)
{ {
Elf_Sym *sym;
Elf_Shdr *sechdrs;
struct purgatory_info *pi = &image->purgatory_info; struct purgatory_info *pi = &image->purgatory_info;
const Elf_Sym *sym;
Elf_Shdr *sec;
char *sym_buf; char *sym_buf;
sym = kexec_purgatory_find_symbol(pi, name); sym = kexec_purgatory_find_symbol(pi, name);
@ -1004,16 +1039,15 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
return -EINVAL; return -EINVAL;
} }
sechdrs = pi->sechdrs; sec = pi->sechdrs + sym->st_shndx;
if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { if (sec->sh_type == SHT_NOBITS) {
pr_err("symbol %s is in a bss section. Cannot %s\n", name, pr_err("symbol %s is in a bss section. Cannot %s\n", name,
get_value ? "get" : "set"); get_value ? "get" : "set");
return -EINVAL; return -EINVAL;
} }
sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
sym->st_value;
if (get_value) if (get_value)
memcpy((void *)buf, sym_buf, size); memcpy((void *)buf, sym_buf, size);
@ -1022,3 +1056,174 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
return 0; return 0;
} }
#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
int crash_exclude_mem_range(struct crash_mem *mem,
unsigned long long mstart, unsigned long long mend)
{
int i, j;
unsigned long long start, end;
struct crash_mem_range temp_range = {0, 0};
for (i = 0; i < mem->nr_ranges; i++) {
start = mem->ranges[i].start;
end = mem->ranges[i].end;
if (mstart > end || mend < start)
continue;
/* Truncate any area outside of range */
if (mstart < start)
mstart = start;
if (mend > end)
mend = end;
/* Found completely overlapping range */
if (mstart == start && mend == end) {
mem->ranges[i].start = 0;
mem->ranges[i].end = 0;
if (i < mem->nr_ranges - 1) {
/* Shift rest of the ranges to left */
for (j = i; j < mem->nr_ranges - 1; j++) {
mem->ranges[j].start =
mem->ranges[j+1].start;
mem->ranges[j].end =
mem->ranges[j+1].end;
}
}
mem->nr_ranges--;
return 0;
}
if (mstart > start && mend < end) {
/* Split original range */
mem->ranges[i].end = mstart - 1;
temp_range.start = mend + 1;
temp_range.end = end;
} else if (mstart != start)
mem->ranges[i].end = mstart - 1;
else
mem->ranges[i].start = mend + 1;
break;
}
/* If a split happened, add the split to array */
if (!temp_range.end)
return 0;
/* Split happened */
if (i == mem->max_nr_ranges - 1)
return -ENOMEM;
/* Location where new range should go */
j = i + 1;
if (j < mem->nr_ranges) {
/* Move over all ranges one slot towards the end */
for (i = mem->nr_ranges - 1; i >= j; i--)
mem->ranges[i + 1] = mem->ranges[i];
}
mem->ranges[j].start = temp_range.start;
mem->ranges[j].end = temp_range.end;
mem->nr_ranges++;
return 0;
}
int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
void **addr, unsigned long *sz)
{
Elf64_Ehdr *ehdr;
Elf64_Phdr *phdr;
unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
unsigned char *buf;
unsigned int cpu, i;
unsigned long long notes_addr;
unsigned long mstart, mend;
/* extra phdr for vmcoreinfo elf note */
nr_phdr = nr_cpus + 1;
nr_phdr += mem->nr_ranges;
/*
* kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
* area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
* I think this is required by tools like gdb. So same physical
* memory will be mapped in two elf headers. One will contain kernel
* text virtual addresses and other will have __va(physical) addresses.
*/
nr_phdr++;
elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
buf = vzalloc(elf_sz);
if (!buf)
return -ENOMEM;
ehdr = (Elf64_Ehdr *)buf;
phdr = (Elf64_Phdr *)(ehdr + 1);
memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
ehdr->e_ident[EI_CLASS] = ELFCLASS64;
ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
ehdr->e_ident[EI_OSABI] = ELF_OSABI;
memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
ehdr->e_type = ET_CORE;
ehdr->e_machine = ELF_ARCH;
ehdr->e_version = EV_CURRENT;
ehdr->e_phoff = sizeof(Elf64_Ehdr);
ehdr->e_ehsize = sizeof(Elf64_Ehdr);
ehdr->e_phentsize = sizeof(Elf64_Phdr);
/* Prepare one phdr of type PT_NOTE for each present cpu */
for_each_present_cpu(cpu) {
phdr->p_type = PT_NOTE;
notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
phdr->p_offset = phdr->p_paddr = notes_addr;
phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
(ehdr->e_phnum)++;
phdr++;
}
/* Prepare one PT_NOTE header for vmcoreinfo */
phdr->p_type = PT_NOTE;
phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
(ehdr->e_phnum)++;
phdr++;
/* Prepare PT_LOAD type program header for kernel text region */
if (kernel_map) {
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_vaddr = (Elf64_Addr)_text;
phdr->p_filesz = phdr->p_memsz = _end - _text;
phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
ehdr->e_phnum++;
phdr++;
}
/* Go through all the ranges in mem->ranges[] and prepare phdr */
for (i = 0; i < mem->nr_ranges; i++) {
mstart = mem->ranges[i].start;
mend = mem->ranges[i].end;
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = mstart;
phdr->p_paddr = mstart;
phdr->p_vaddr = (unsigned long long) __va(mstart);
phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
phdr->p_align = 0;
ehdr->e_phnum++;
phdr++;
pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
ehdr->e_phnum, phdr->p_offset);
}
*addr = buf;
*sz = elf_sz;
return 0;
}

View file

@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
alloc.start = constraint->alignf(constraint->alignf_data, &avail, alloc.start = constraint->alignf(constraint->alignf_data, &avail,
size, constraint->align); size, constraint->align);
alloc.end = alloc.start + size - 1; alloc.end = alloc.start + size - 1;
if (resource_contains(&avail, &alloc)) { if (alloc.start <= alloc.end &&
resource_contains(&avail, &alloc)) {
new->start = alloc.start; new->start = alloc.start;
new->end = alloc.end; new->end = alloc.end;
return 0; return 0;

View file

@ -16,9 +16,9 @@
*/ */
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/sha256.h>
#include <linux/string.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include "sha256.h"
#include "../boot/string.h"
static inline u32 Ch(u32 x, u32 y, u32 z) static inline u32 Ch(u32 x, u32 y, u32 z)
{ {

View file

@ -2719,7 +2719,6 @@ out:
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return ret; return ret;
} }
EXPORT_SYMBOL(filemap_page_mkwrite);
const struct vm_operations_struct generic_file_vm_ops = { const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault, .fault = filemap_fault,
@ -2750,6 +2749,10 @@ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
return generic_file_mmap(file, vma); return generic_file_mmap(file, vma);
} }
#else #else
int filemap_page_mkwrite(struct vm_fault *vmf)
{
return -ENOSYS;
}
int generic_file_mmap(struct file * file, struct vm_area_struct * vma) int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
{ {
return -ENOSYS; return -ENOSYS;
@ -2760,6 +2763,7 @@ int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
EXPORT_SYMBOL(filemap_page_mkwrite);
EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_mmap);
EXPORT_SYMBOL(generic_file_readonly_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap);

View file

@ -1740,7 +1740,9 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
/* /*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values. * the regular GUP.
* Note a difference with get_user_pages_fast: this always returns the
* number of pages pinned, 0 if no pages were pinned.
*/ */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write, int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages) struct page **pages)
@ -1806,9 +1808,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
len = (unsigned long) nr_pages << PAGE_SHIFT; len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len; end = start + len;
if (nr_pages <= 0)
return 0;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len))) (void __user *)start, len)))
return 0; return -EFAULT;
if (gup_fast_permitted(start, nr_pages, write)) { if (gup_fast_permitted(start, nr_pages, write)) {
local_irq_disable(); local_irq_disable();

View file

@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct page **pages; struct page **pages;
nr_pages = gup->size / PAGE_SIZE; nr_pages = gup->size / PAGE_SIZE;
pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL); pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
} }
nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i); nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
if (nr <= 0)
break;
i += nr; i += nr;
} }
end_time = ktime_get(); end_time = ktime_get();

View file

@ -4086,7 +4086,8 @@ next:
next_reap_node(); next_reap_node();
out: out:
/* Set up the next iteration */ /* Set up the next iteration */
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); schedule_delayed_work_on(smp_processor_id(), work,
round_jiffies_relative(REAPTIMEOUT_AC));
} }
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)

View file

@ -297,8 +297,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
/* /*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP. * back to the regular GUP.
* If the architecture not support this function, simply return with no * Note a difference with get_user_pages_fast: this always returns the
* page pinned * number of pages pinned, 0 if no pages were pinned.
* If the architecture does not support this function, simply return with no
* pages pinned.
*/ */
int __weak __get_user_pages_fast(unsigned long start, int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages) int nr_pages, int write, struct page **pages)