intel-iommu: Make dma_pte_clear_range() use pfns

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
David Woodhouse 2009-06-27 19:15:01 +01:00
parent 66eae8469e
commit 04b18e65dd

View file

@ -779,21 +779,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, unsigned long pfn)
/* clear last level pte, a tlb flush should be followed */ /* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
{ {
int addr_width = agaw_to_width(domain->agaw); unsigned long start_pfn = IOVA_PFN(start);
int npages; unsigned long end_pfn = IOVA_PFN(end-1);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
BUG_ON(start >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON((end-1) >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && end_pfn >> addr_width);
/* in case it's partial page */ /* we don't need lock here; nobody else touches the iova range */
start &= PAGE_MASK; while (start_pfn <= end_pfn) {
end = PAGE_ALIGN(end); dma_pte_clear_one(domain, start_pfn);
npages = (end - start) / VTD_PAGE_SIZE; start_pfn++;
/* we don't need lock here, nobody else touches the iova range */
while (npages--) {
dma_pte_clear_one(domain, start >> VTD_PAGE_SHIFT);
start += VTD_PAGE_SIZE;
} }
} }