intel-iommu: trivially inline DMA PTE macros

Signed-off-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
Mark McLoughlin 2008-11-21 16:56:53 +00:00 committed by Joerg Roedel
parent c07e7d217b
commit 19c239ce3d

View file

@ -164,16 +164,41 @@ static inline void context_clear_entry(struct context_entry *context)
struct dma_pte { struct dma_pte {
u64 val; u64 val;
}; };
#define dma_clear_pte(p) do {(p).val = 0;} while (0)
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) static inline void dma_clear_pte(struct dma_pte *pte)
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) {
#define dma_set_pte_prot(p, prot) \ pte->val = 0;
do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) }
#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
#define dma_set_pte_addr(p, addr) do {\ static inline void dma_set_pte_readable(struct dma_pte *pte)
(p).val |= ((addr) & VTD_PAGE_MASK); } while (0) {
#define dma_pte_present(p) (((p).val & 3) != 0) pte->val |= DMA_PTE_READ;
}
static inline void dma_set_pte_writable(struct dma_pte *pte)
{
pte->val |= DMA_PTE_WRITE;
}
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
{
pte->val = (pte->val & ~3) | (prot & 3);
}
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
return (pte->val & VTD_PAGE_MASK);
}
static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr)
{
pte->val |= (addr & VTD_PAGE_MASK);
}
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
}
struct dmar_domain { struct dmar_domain {
int id; /* domain id */ int id; /* domain id */
@ -487,7 +512,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
if (level == 1) if (level == 1)
break; break;
if (!dma_pte_present(*pte)) { if (!dma_pte_present(pte)) {
tmp_page = alloc_pgtable_page(); tmp_page = alloc_pgtable_page();
if (!tmp_page) { if (!tmp_page) {
@ -497,16 +522,16 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
} }
__iommu_flush_cache(domain->iommu, tmp_page, __iommu_flush_cache(domain->iommu, tmp_page,
PAGE_SIZE); PAGE_SIZE);
dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); dma_set_pte_addr(pte, virt_to_phys(tmp_page));
/* /*
* high level table always sets r/w, last level page * high level table always sets r/w, last level page
* table control read/write * table control read/write
*/ */
dma_set_pte_readable(*pte); dma_set_pte_readable(pte);
dma_set_pte_writable(*pte); dma_set_pte_writable(pte);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
} }
parent = phys_to_virt(dma_pte_addr(*pte)); parent = phys_to_virt(dma_pte_addr(pte));
level--; level--;
} }
@ -529,9 +554,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
if (level == total) if (level == total)
return pte; return pte;
if (!dma_pte_present(*pte)) if (!dma_pte_present(pte))
break; break;
parent = phys_to_virt(dma_pte_addr(*pte)); parent = phys_to_virt(dma_pte_addr(pte));
total--; total--;
} }
return NULL; return NULL;
@ -546,7 +571,7 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
pte = dma_addr_level_pte(domain, addr, 1); pte = dma_addr_level_pte(domain, addr, 1);
if (pte) { if (pte) {
dma_clear_pte(*pte); dma_clear_pte(pte);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
} }
} }
@ -593,8 +618,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
pte = dma_addr_level_pte(domain, tmp, level); pte = dma_addr_level_pte(domain, tmp, level);
if (pte) { if (pte) {
free_pgtable_page( free_pgtable_page(
phys_to_virt(dma_pte_addr(*pte))); phys_to_virt(dma_pte_addr(pte)));
dma_clear_pte(*pte); dma_clear_pte(pte);
__iommu_flush_cache(domain->iommu, __iommu_flush_cache(domain->iommu,
pte, sizeof(*pte)); pte, sizeof(*pte));
} }
@ -1421,9 +1446,9 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
/* We don't need lock here, nobody else /* We don't need lock here, nobody else
* touches the iova range * touches the iova range
*/ */
BUG_ON(dma_pte_addr(*pte)); BUG_ON(dma_pte_addr(pte));
dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(*pte, prot); dma_set_pte_prot(pte, prot);
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
start_pfn++; start_pfn++;
index++; index++;
@ -2584,7 +2609,7 @@ u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova)
pte = addr_to_dma_pte(domain, iova); pte = addr_to_dma_pte(domain, iova);
if (pte) if (pte)
pfn = dma_pte_addr(*pte); pfn = dma_pte_addr(pte);
return pfn >> VTD_PAGE_SHIFT; return pfn >> VTD_PAGE_SHIFT;
} }