diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 90eef404ec2e..615d9743a3cb 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1771,12 +1771,9 @@ static void collapse_huge_page(struct mm_struct *mm, VM_BUG_ON(address & ~HPAGE_PMD_MASK); #ifndef CONFIG_NUMA + up_read(&mm->mmap_sem); VM_BUG_ON(!*hpage); new_page = *hpage; - if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { - up_read(&mm->mmap_sem); - return; - } #else VM_BUG_ON(*hpage); /* @@ -1791,22 +1788,26 @@ static void collapse_huge_page(struct mm_struct *mm, */ new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, node, __GFP_OTHER_NODE); + + /* + * After allocating the hugepage, release the mmap_sem read lock in + * preparation for taking it in write mode. + */ + up_read(&mm->mmap_sem); if (unlikely(!new_page)) { - up_read(&mm->mmap_sem); count_vm_event(THP_COLLAPSE_ALLOC_FAILED); *hpage = ERR_PTR(-ENOMEM); return; } - count_vm_event(THP_COLLAPSE_ALLOC); - if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { - up_read(&mm->mmap_sem); - put_page(new_page); - return; - } #endif - /* after allocating the hugepage upgrade to mmap_sem write mode */ - up_read(&mm->mmap_sem); + count_vm_event(THP_COLLAPSE_ALLOC); + if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { +#ifdef CONFIG_NUMA + put_page(new_page); +#endif + return; + } /* * Prevent all access to pagetables with the exception of