powerpc/book3s64: Fix error handling in mm_iommu_do_alloc()
[ Upstream commit5.4-rM2-2.2.x-imx-squashedc4b78169e3
] The last jump to free_exit in mm_iommu_do_alloc() happens after page pointers in struct mm_iommu_table_group_mem_t were already converted to physical addresses. Thus calling put_page() on these physical addresses will likely crash. This moves the loop which calculates the pageshift and converts page struct pointers to physical addresses later after the point when we cannot fail; thus eliminating the need to convert pointers back. Fixes:eb9d7a62c3
("powerpc/mm_iommu: Fix potential deadlock") Reported-by: Jan Kara <jack@suse.cz> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20191223060351.26359-1-aik@ozlabs.ru Signed-off-by: Sasha Levin <sashal@kernel.org>
parent
efc95f2ef7
commit
f211830829
|
@ -121,24 +121,6 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
|
|||
goto free_exit;
|
||||
}
|
||||
|
||||
pageshift = PAGE_SHIFT;
|
||||
for (i = 0; i < entries; ++i) {
|
||||
struct page *page = mem->hpages[i];
|
||||
|
||||
/*
|
||||
* Allow to use larger than 64k IOMMU pages. Only do that
|
||||
* if we are backed by hugetlb.
|
||||
*/
|
||||
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
|
||||
pageshift = page_shift(compound_head(page));
|
||||
mem->pageshift = min(mem->pageshift, pageshift);
|
||||
/*
|
||||
* We don't need struct page reference any more, switch
|
||||
* to physical address.
|
||||
*/
|
||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
good_exit:
|
||||
atomic64_set(&mem->mapped, 1);
|
||||
mem->used = 1;
|
||||
|
@ -158,6 +140,27 @@ good_exit:
|
|||
}
|
||||
}
|
||||
|
||||
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
|
||||
/*
|
||||
* Allow to use larger than 64k IOMMU pages. Only do that
|
||||
* if we are backed by hugetlb. Skip device memory as it is not
|
||||
* backed with page structs.
|
||||
*/
|
||||
pageshift = PAGE_SHIFT;
|
||||
for (i = 0; i < entries; ++i) {
|
||||
struct page *page = mem->hpages[i];
|
||||
|
||||
if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
|
||||
pageshift = page_shift(compound_head(page));
|
||||
mem->pageshift = min(mem->pageshift, pageshift);
|
||||
/*
|
||||
* We don't need struct page reference any more, switch
|
||||
* to physical address.
|
||||
*/
|
||||
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
|
||||
|
||||
mutex_unlock(&mem_list_mutex);
|
||||
|
|
Loading…
Reference in New Issue