intel-iommu: Introduce first_pte_in_page() to simplify PTE-setting loops

On Wed, 2009-07-01 at 16:59 -0700, Linus Torvalds wrote:
> I also _really_ hate how you do
>
>         (unsigned long)pte >> VTD_PAGE_SHIFT ==
>         (unsigned long)first_pte >> VTD_PAGE_SHIFT

Kill this, in favour of just looking to see if the incremented pte
pointer has 'wrapped' onto the next page. Which means we have to check
it _after_ incrementing it, not before.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
David Woodhouse 2009-07-02 11:21:16 +01:00
parent 7766a3fb90
commit 75e6bf9638

View file

@ -240,6 +240,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0; return (pte->val & 3) != 0;
} }
static inline int first_pte_in_page(struct dma_pte *pte)
{
return !((unsigned long)pte & ~VTD_PAGE_MASK);
}
/* /*
* This domain is a statically identity mapping domain. * This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory. * 1. This domain creats a static 1:1 mapping to all usable memory.
@ -780,13 +785,12 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
start_pfn = align_to_level(start_pfn + 1, 2); start_pfn = align_to_level(start_pfn + 1, 2);
continue; continue;
} }
while (start_pfn <= last_pfn && do {
(unsigned long)pte >> VTD_PAGE_SHIFT ==
(unsigned long)first_pte >> VTD_PAGE_SHIFT) {
dma_clear_pte(pte); dma_clear_pte(pte);
start_pfn++; start_pfn++;
pte++; pte++;
} } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
domain_flush_cache(domain, first_pte, domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
} }
@ -821,14 +825,14 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
tmp = align_to_level(tmp + 1, level + 1); tmp = align_to_level(tmp + 1, level + 1);
continue; continue;
} }
while (tmp + level_size(level) - 1 <= last_pfn && do {
(unsigned long)pte >> VTD_PAGE_SHIFT ==
(unsigned long)first_pte >> VTD_PAGE_SHIFT) {
free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
dma_clear_pte(pte); dma_clear_pte(pte);
pte++; pte++;
tmp += level_size(level); tmp += level_size(level);
} } while (!first_pte_in_page(pte) &&
tmp + level_size(level) - 1 <= last_pfn);
domain_flush_cache(domain, first_pte, domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
@ -1694,9 +1698,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
WARN_ON(1); WARN_ON(1);
} }
pte++; pte++;
if (!nr_pages || if (!nr_pages || first_pte_in_page(pte)) {
(unsigned long)pte >> VTD_PAGE_SHIFT !=
(unsigned long)first_pte >> VTD_PAGE_SHIFT) {
domain_flush_cache(domain, first_pte, domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
pte = NULL; pte = NULL;