1
0
Fork 0

powerpc/mm/thp: Return pte address if we find trans_splitting.

For THP that is marked trans splitting, we return the pte.
This require the callers to handle the pmd_trans_splitting scenario,
if they care. All the current callers are either looking at pfn or
write_ok, hence we don't need to update them.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
hifive-unleashed-5.1
Aneesh Kumar K.V 2015-03-30 10:41:04 +05:30 committed by Michael Ellerman
parent 691e95fd73
commit 7d6e7f7ffa
4 changed files with 10 additions and 22 deletions

View File

@ -281,11 +281,9 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
/*
* If it's present and writable, atomically set dirty and referenced bits and
* return the PTE, otherwise return 0. If we find a transparent hugepage
* and if it is marked splitting we return 0;
* return the PTE, otherwise return 0.
*/
static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
unsigned int hugepage)
static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
{
pte_t old_pte, new_pte = __pte(0);
@ -301,12 +299,6 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
cpu_relax();
continue;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* If hugepage and is trans splitting return None */
if (unlikely(hugepage &&
pmd_trans_splitting(pte_pmd(old_pte))))
return __pte(0);
#endif
/* If pte is not present return None */
if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0);

View File

@ -537,20 +537,17 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/* if the guest wants write access, see if that is OK */
if (!writing && hpte_is_writable(r)) {
unsigned int hugepage_shift;
pte_t *ptep, pte;
unsigned long flags;
/*
* We need to protect against page table destruction
* while looking up and updating the pte.
* hugepage split and collapse.
*/
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(current->mm->pgd,
hva, &hugepage_shift);
hva, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1,
hugepage_shift);
pte = kvmppc_read_update_linux_pte(ptep, 1);
if (pte_write(pte))
write_ok = 1;
}

View File

@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
local_irq_restore(flags);
return H_PARAMETER;
}
pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) {
if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */

View File

@ -1014,12 +1014,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
*
* A hugepage split is captured by pmd_trans_splitting
* because we mark the pmd trans splitting and do a
* hpte invalidate
*
* We don't worry about pmd_trans_splitting here, The
* caller if it needs to handle the splitting case
* should check for that.
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
if (pmd_none(pmd))
return NULL;
if (pmd_huge(pmd) || pmd_large(pmd)) {