1
0
Fork 0

vdso: make arch_setup_additional_pages wait for mmap_sem for write killable

most architectures are relying on mmap_sem for write in their
arch_setup_additional_pages.  If the waiting task gets killed by the oom
killer it would block oom_reaper from asynchronous address space reclaim
and reduce the chances of timely OOM resolving.  Wait for the lock in
the killable mode and return with EINTR if the task got killed while
waiting.

Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Andy Lutomirski <luto@amacapital.net>	[x86 vdso]
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Michal Hocko 2016-05-23 16:25:54 -07:00 committed by Linus Torvalds
parent 91f4f94ea3
commit 6904817607
9 changed files with 21 additions and 10 deletions

View File

@ -420,7 +420,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(addr)) {

View File

@ -95,7 +95,8 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
};
void *ret;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
current->mm->context.vdso = (void *)addr;
/* Map vectors page at the high address. */
@ -163,7 +164,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
/* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = ERR_PTR(vdso_base);

View File

@ -65,7 +65,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
/* Try to get it loaded right near ld.so/glibc. */
vdso_base = STACK_TOP;

View File

@ -104,7 +104,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct resource gic_res;
int ret;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
/*
* Determine total area size. This includes the VDSO data itself, the

View File

@ -195,7 +195,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned.
*/
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
((VDSO_ALIGNMENT - 1) & PAGE_MASK),

View File

@ -216,7 +216,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;

View File

@ -64,7 +64,9 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
int ret;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;

View File

@ -163,7 +163,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
addr = 0;
}
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
addr = get_unmapped_area(NULL, addr,
image->size - image->sym_vvar_start, 0, 0);

View File

@ -61,7 +61,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!vdso_enabled)
return 0;
down_write(&mm->mmap_sem);
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
VM_READ|VM_EXEC|