1
0
Fork 0

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (33 commits)
  iommu/core: Remove global iommu_ops and register_iommu
  iommu/msm: Use bus_set_iommu instead of register_iommu
  iommu/omap: Use bus_set_iommu instead of register_iommu
  iommu/vt-d: Use bus_set_iommu instead of register_iommu
  iommu/amd: Use bus_set_iommu instead of register_iommu
  iommu/core: Use bus->iommu_ops in the iommu-api
  iommu/core: Convert iommu_found to iommu_present
  iommu/core: Add bus_type parameter to iommu_domain_alloc
  Driver core: Add iommu_ops to bus_type
  iommu/core: Define iommu_ops and register_iommu only with CONFIG_IOMMU_API
  iommu/amd: Fix wrong shift direction
  iommu/omap: always provide iommu debug code
  iommu/core: let drivers know if an iommu fault handler isn't installed
  iommu/core: export iommu_set_fault_handler()
  iommu/omap: Fix build error with !IOMMU_SUPPORT
  iommu/omap: Migrate to the generic fault report mechanism
  iommu/core: Add fault reporting mechanism
  iommu/core: Use PAGE_SIZE instead of hard-coded value
  iommu/core: use the existing IS_ALIGNED macro
  iommu/msm: ->unmap() should return order of unmapped page
  ...

Fixup trivial conflicts in drivers/iommu/Makefile: "move omap iommu to
dedicated iommu folder" vs "Rename the DMAR and INTR_REMAP config
options" just happened to touch lines next to each other.
hifive-unleashed-5.1
Linus Torvalds 2011-10-30 15:46:19 -07:00
commit 0cfdc72439
28 changed files with 869 additions and 647 deletions

View File

@ -66,7 +66,7 @@
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0) ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
static void __iommu_set_twl(struct iommu *obj, bool on) static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{ {
u32 l = iommu_read_reg(obj, MMU_CNTL); u32 l = iommu_read_reg(obj, MMU_CNTL);
@ -85,7 +85,7 @@ static void __iommu_set_twl(struct iommu *obj, bool on)
} }
static int omap2_iommu_enable(struct iommu *obj) static int omap2_iommu_enable(struct omap_iommu *obj)
{ {
u32 l, pa; u32 l, pa;
unsigned long timeout; unsigned long timeout;
@ -127,7 +127,7 @@ static int omap2_iommu_enable(struct iommu *obj)
return 0; return 0;
} }
static void omap2_iommu_disable(struct iommu *obj) static void omap2_iommu_disable(struct omap_iommu *obj)
{ {
u32 l = iommu_read_reg(obj, MMU_CNTL); u32 l = iommu_read_reg(obj, MMU_CNTL);
@ -138,12 +138,12 @@ static void omap2_iommu_disable(struct iommu *obj)
dev_dbg(obj->dev, "%s is shutting down\n", obj->name); dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
} }
static void omap2_iommu_set_twl(struct iommu *obj, bool on) static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
{ {
__iommu_set_twl(obj, false); __iommu_set_twl(obj, false);
} }
static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra) static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
{ {
u32 stat, da; u32 stat, da;
u32 errs = 0; u32 errs = 0;
@ -173,13 +173,13 @@ static u32 omap2_iommu_fault_isr(struct iommu *obj, u32 *ra)
return errs; return errs;
} }
static void omap2_tlb_read_cr(struct iommu *obj, struct cr_regs *cr) static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
cr->cam = iommu_read_reg(obj, MMU_READ_CAM); cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM); cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
} }
static void omap2_tlb_load_cr(struct iommu *obj, struct cr_regs *cr) static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM); iommu_write_reg(obj, cr->ram, MMU_RAM);
@ -193,7 +193,8 @@ static u32 omap2_cr_to_virt(struct cr_regs *cr)
return cr->cam & mask; return cr->cam & mask;
} }
static struct cr_regs *omap2_alloc_cr(struct iommu *obj, struct iotlb_entry *e) static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{ {
struct cr_regs *cr; struct cr_regs *cr;
@ -230,7 +231,8 @@ static u32 omap2_get_pte_attr(struct iotlb_entry *e)
return attr; return attr;
} }
static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf) static ssize_t
omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
{ {
char *p = buf; char *p = buf;
@ -254,7 +256,8 @@ static ssize_t omap2_dump_cr(struct iommu *obj, struct cr_regs *cr, char *buf)
goto out; \ goto out; \
} while (0) } while (0)
static ssize_t omap2_iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len) static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{ {
char *p = buf; char *p = buf;
@ -280,7 +283,7 @@ out:
return p - buf; return p - buf;
} }
static void omap2_iommu_save_ctx(struct iommu *obj) static void omap2_iommu_save_ctx(struct omap_iommu *obj)
{ {
int i; int i;
u32 *p = obj->ctx; u32 *p = obj->ctx;
@ -293,7 +296,7 @@ static void omap2_iommu_save_ctx(struct iommu *obj)
BUG_ON(p[0] != IOMMU_ARCH_VERSION); BUG_ON(p[0] != IOMMU_ARCH_VERSION);
} }
static void omap2_iommu_restore_ctx(struct iommu *obj) static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
{ {
int i; int i;
u32 *p = obj->ctx; u32 *p = obj->ctx;
@ -343,13 +346,13 @@ static const struct iommu_functions omap2_iommu_ops = {
static int __init omap2_iommu_init(void) static int __init omap2_iommu_init(void)
{ {
return install_iommu_arch(&omap2_iommu_ops); return omap_install_iommu_arch(&omap2_iommu_ops);
} }
module_init(omap2_iommu_init); module_init(omap2_iommu_init);
static void __exit omap2_iommu_exit(void) static void __exit omap2_iommu_exit(void)
{ {
uninstall_iommu_arch(&omap2_iommu_ops); omap_uninstall_iommu_arch(&omap2_iommu_ops);
} }
module_exit(omap2_iommu_exit); module_exit(omap2_iommu_exit);

View File

@ -134,18 +134,6 @@ config OMAP_MBOX_KFIFO_SIZE
This can also be changed at runtime (via the mbox_kfifo_size This can also be changed at runtime (via the mbox_kfifo_size
module parameter). module parameter).
config OMAP_IOMMU
tristate
config OMAP_IOMMU_DEBUG
tristate "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS
help
Select this to see extensive information about
the internal state of OMAP IOMMU in debugfs.
Say N unless you know you need this.
config OMAP_IOMMU_IVA2 config OMAP_IOMMU_IVA2
bool bool

View File

@ -18,8 +18,6 @@ obj-$(CONFIG_ARCH_OMAP3) += omap_device.o
obj-$(CONFIG_ARCH_OMAP4) += omap_device.o obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
obj-$(CONFIG_OMAP_IOMMU) += iommu.o iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += iommu-debug.o
obj-$(CONFIG_CPU_FREQ) += cpu-omap.o obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o

View File

@ -25,16 +25,17 @@ struct iotlb_entry {
}; };
}; };
struct iommu { struct omap_iommu {
const char *name; const char *name;
struct module *owner; struct module *owner;
struct clk *clk; struct clk *clk;
void __iomem *regbase; void __iomem *regbase;
struct device *dev; struct device *dev;
void *isr_priv; void *isr_priv;
struct iommu_domain *domain;
unsigned int refcount; unsigned int refcount;
struct mutex iommu_lock; /* global for this whole object */ spinlock_t iommu_lock; /* global for this whole object */
/* /*
* We don't change iopgd for a situation like pgd for a task, * We don't change iopgd for a situation like pgd for a task,
@ -48,8 +49,6 @@ struct iommu {
struct list_head mmap; struct list_head mmap;
struct mutex mmap_lock; /* protect mmap */ struct mutex mmap_lock; /* protect mmap */
int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs, void *priv);
void *ctx; /* iommu context: registres saved area */ void *ctx; /* iommu context: registres saved area */
u32 da_start; u32 da_start;
u32 da_end; u32 da_end;
@ -81,25 +80,27 @@ struct iotlb_lock {
struct iommu_functions { struct iommu_functions {
unsigned long version; unsigned long version;
int (*enable)(struct iommu *obj); int (*enable)(struct omap_iommu *obj);
void (*disable)(struct iommu *obj); void (*disable)(struct omap_iommu *obj);
void (*set_twl)(struct iommu *obj, bool on); void (*set_twl)(struct omap_iommu *obj, bool on);
u32 (*fault_isr)(struct iommu *obj, u32 *ra); u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
void (*tlb_read_cr)(struct iommu *obj, struct cr_regs *cr); void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
void (*tlb_load_cr)(struct iommu *obj, struct cr_regs *cr); void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
struct cr_regs *(*alloc_cr)(struct iommu *obj, struct iotlb_entry *e); struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
struct iotlb_entry *e);
int (*cr_valid)(struct cr_regs *cr); int (*cr_valid)(struct cr_regs *cr);
u32 (*cr_to_virt)(struct cr_regs *cr); u32 (*cr_to_virt)(struct cr_regs *cr);
void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e); void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
ssize_t (*dump_cr)(struct iommu *obj, struct cr_regs *cr, char *buf); ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
char *buf);
u32 (*get_pte_attr)(struct iotlb_entry *e); u32 (*get_pte_attr)(struct iotlb_entry *e);
void (*save_ctx)(struct iommu *obj); void (*save_ctx)(struct omap_iommu *obj);
void (*restore_ctx)(struct iommu *obj); void (*restore_ctx)(struct omap_iommu *obj);
ssize_t (*dump_ctx)(struct iommu *obj, char *buf, ssize_t len); ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
}; };
struct iommu_platform_data { struct iommu_platform_data {
@ -150,40 +151,31 @@ struct iommu_platform_data {
/* /*
* global functions * global functions
*/ */
extern u32 iommu_arch_version(void); extern u32 omap_iommu_arch_version(void);
extern void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e); extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
extern u32 iotlb_cr_to_virt(struct cr_regs *cr);
extern int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e); extern int
extern void iommu_set_twl(struct iommu *obj, bool on); omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
extern void flush_iotlb_page(struct iommu *obj, u32 da);
extern void flush_iotlb_range(struct iommu *obj, u32 start, u32 end);
extern void flush_iotlb_all(struct iommu *obj);
extern int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e); extern int omap_iommu_set_isr(const char *name,
extern void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, int (*isr)(struct omap_iommu *obj, u32 da, u32 iommu_errs,
u32 **ppte);
extern size_t iopgtable_clear_entry(struct iommu *obj, u32 iova);
extern int iommu_set_da_range(struct iommu *obj, u32 start, u32 end);
extern struct iommu *iommu_get(const char *name);
extern void iommu_put(struct iommu *obj);
extern int iommu_set_isr(const char *name,
int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
void *priv), void *priv),
void *isr_priv); void *isr_priv);
extern void iommu_save_ctx(struct iommu *obj); extern void omap_iommu_save_ctx(struct omap_iommu *obj);
extern void iommu_restore_ctx(struct iommu *obj); extern void omap_iommu_restore_ctx(struct omap_iommu *obj);
extern int install_iommu_arch(const struct iommu_functions *ops); extern int omap_install_iommu_arch(const struct iommu_functions *ops);
extern void uninstall_iommu_arch(const struct iommu_functions *ops); extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
extern int foreach_iommu_device(void *data, extern int omap_foreach_iommu_device(void *data,
int (*fn)(struct device *, void *)); int (*fn)(struct device *, void *));
extern ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t len); extern ssize_t
extern size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t len); omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
struct device *omap_find_iommu_device(const char *name);
#endif /* __MACH_IOMMU_H */ #endif /* __MACH_IOMMU_H */

View File

@ -83,12 +83,12 @@
/* /*
* register accessors * register accessors
*/ */
static inline u32 iommu_read_reg(struct iommu *obj, size_t offs) static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
{ {
return __raw_readl(obj->regbase + offs); return __raw_readl(obj->regbase + offs);
} }
static inline void iommu_write_reg(struct iommu *obj, u32 val, size_t offs) static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{ {
__raw_writel(val, obj->regbase + offs); __raw_writel(val, obj->regbase + offs);
} }

View File

@ -56,6 +56,19 @@
#define IOPAGE_MASK IOPTE_MASK #define IOPAGE_MASK IOPTE_MASK
/**
* omap_iommu_translate() - va to pa translation
* @d: omap iommu descriptor
* @va: virtual address
* @mask: omap iommu descriptor mask
*
* va to pa translation
*/
static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
{
return (d & mask) | (va & (~mask));
}
/* /*
* some descriptor attributes. * some descriptor attributes.
*/ */
@ -64,10 +77,15 @@
#define IOPGD_SUPER (1 << 18 | 2 << 0) #define IOPGD_SUPER (1 << 18 | 2 << 0)
#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
#define IOPTE_SMALL (2 << 0) #define IOPTE_SMALL (2 << 0)
#define IOPTE_LARGE (1 << 0) #define IOPTE_LARGE (1 << 0)
#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
/* to find an entry in a page-table-directory */ /* to find an entry in a page-table-directory */
#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
@ -97,6 +115,6 @@ static inline u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa,
} }
#define to_iommu(dev) \ #define to_iommu(dev) \
(struct iommu *)platform_get_drvdata(to_platform_device(dev)) (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
#endif /* __PLAT_OMAP_IOMMU_H */ #endif /* __PLAT_OMAP_IOMMU_H */

View File

@ -13,8 +13,10 @@
#ifndef __IOMMU_MMAP_H #ifndef __IOMMU_MMAP_H
#define __IOMMU_MMAP_H #define __IOMMU_MMAP_H
#include <linux/iommu.h>
struct iovm_struct { struct iovm_struct {
struct iommu *iommu; /* iommu object which this belongs to */ struct omap_iommu *iommu; /* iommu object which this belongs to */
u32 da_start; /* area definition */ u32 da_start; /* area definition */
u32 da_end; u32 da_end;
u32 flags; /* IOVMF_: see below */ u32 flags; /* IOVMF_: see below */
@ -70,20 +72,18 @@ struct iovm_struct {
#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT)) #define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
extern struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da); extern struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da);
extern u32 iommu_vmap(struct iommu *obj, u32 da, extern u32
omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
const struct sg_table *sgt, u32 flags); const struct sg_table *sgt, u32 flags);
extern struct sg_table *iommu_vunmap(struct iommu *obj, u32 da); extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
extern u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, struct omap_iommu *obj, u32 da);
u32 flags); extern u32
extern void iommu_vfree(struct iommu *obj, const u32 da); omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj,
extern u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, u32 da, size_t bytes, u32 flags);
u32 flags); extern void
extern void iommu_kunmap(struct iommu *obj, u32 da); omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
extern u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, const u32 da);
u32 flags); extern void *omap_da_to_va(struct omap_iommu *obj, u32 da);
extern void iommu_kfree(struct iommu *obj, u32 da);
extern void *da_to_va(struct iommu *obj, u32 da);
#endif /* __IOMMU_MMAP_H */ #endif /* __IOMMU_MMAP_H */

View File

@ -33,6 +33,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include <linux/pci.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/gcc_intrin.h> #include <asm/gcc_intrin.h>
@ -204,7 +205,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
case KVM_CAP_IOMMU: case KVM_CAP_IOMMU:
r = iommu_found(); r = iommu_present(&pci_bus_type);
break; break;
default: default:
r = 0; r = 0;

View File

@ -44,6 +44,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/pci.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
@ -2123,7 +2124,7 @@ int kvm_dev_ioctl_check_extension(long ext)
r = 0; r = 0;
break; break;
case KVM_CAP_IOMMU: case KVM_CAP_IOMMU:
r = iommu_found(); r = iommu_present(&pci_bus_type);
break; break;
case KVM_CAP_MCE: case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS; r = KVM_MAX_MCE_BANKS;

View File

@ -112,4 +112,23 @@ config IRQ_REMAP
To use x2apic mode in the CPU's which support x2APIC enhancements or To use x2apic mode in the CPU's which support x2APIC enhancements or
to support platforms with CPU's having > 8 bit APIC ID, say Y. to support platforms with CPU's having > 8 bit APIC ID, say Y.
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
depends on ARCH_OMAP
select IOMMU_API
config OMAP_IOVMM
tristate "OMAP IO Virtual Memory Manager Support"
depends on OMAP_IOMMU
config OMAP_IOMMU_DEBUG
tristate "Export OMAP IOMMU/IOVMM internals in DebugFS"
depends on OMAP_IOVMM && DEBUG_FS
help
Select this to see extensive information about
the internal state of OMAP IOMMU/IOVMM in debugfs.
Say N unless you know you need this.
endif # IOMMU_SUPPORT endif # IOMMU_SUPPORT

View File

@ -4,3 +4,6 @@ obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o

View File

@ -1283,7 +1283,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
if (!pte || !IOMMU_PTE_PRESENT(*pte)) if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue; continue;
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
} }
update_domain(&dma_dom->domain); update_domain(&dma_dom->domain);
@ -2495,7 +2495,7 @@ static unsigned device_dma_ops_init(void)
void __init amd_iommu_init_api(void) void __init amd_iommu_init_api(void)
{ {
register_iommu(&amd_iommu_ops); bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
} }
int __init amd_iommu_init_dma_ops(void) int __init amd_iommu_init_dma_ops(void)

View File

@ -3642,7 +3642,7 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops(); init_iommu_pm_ops();
register_iommu(&intel_iommu_ops); bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb); bus_register_notifier(&pci_bus_type, &device_nb);

View File

@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/module.h> #include <linux/module.h>
@ -23,32 +25,78 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/iommu.h> #include <linux/iommu.h>
static struct iommu_ops *iommu_ops; static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
void register_iommu(struct iommu_ops *ops)
{ {
if (iommu_ops)
BUG();
iommu_ops = ops;
} }
bool iommu_found(void) /**
* bus_set_iommu - set iommu-callbacks for the bus
* @bus: bus.
* @ops: the callbacks provided by the iommu-driver
*
* This function is called by an iommu driver to set the iommu methods
* used for a particular bus. Drivers for devices on that bus can use
* the iommu-api after these ops are registered.
* This special function is needed because IOMMUs are usually devices on
* the bus itself, so the iommu drivers are not initialized when the bus
* is set up. With this function the iommu-driver can set the iommu-ops
* afterwards.
*/
int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
{ {
return iommu_ops != NULL; if (bus->iommu_ops != NULL)
} return -EBUSY;
EXPORT_SYMBOL_GPL(iommu_found);
struct iommu_domain *iommu_domain_alloc(void) bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */
iommu_bus_init(bus, ops);
return 0;
}
EXPORT_SYMBOL_GPL(bus_set_iommu);
bool iommu_present(struct bus_type *bus)
{
return bus->iommu_ops != NULL;
}
EXPORT_SYMBOL_GPL(iommu_present);
/**
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
*
* The fault handler itself should return 0 on success, and an appropriate
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler)
{
BUG_ON(!domain);
domain->handler = handler;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{ {
struct iommu_domain *domain; struct iommu_domain *domain;
int ret; int ret;
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
domain = kmalloc(sizeof(*domain), GFP_KERNEL); domain = kmalloc(sizeof(*domain), GFP_KERNEL);
if (!domain) if (!domain)
return NULL; return NULL;
ret = iommu_ops->domain_init(domain); domain->ops = bus->iommu_ops;
ret = domain->ops->domain_init(domain);
if (ret) if (ret)
goto out_free; goto out_free;
@ -63,62 +111,78 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain) void iommu_domain_free(struct iommu_domain *domain)
{ {
iommu_ops->domain_destroy(domain); if (likely(domain->ops->domain_destroy != NULL))
domain->ops->domain_destroy(domain);
kfree(domain); kfree(domain);
} }
EXPORT_SYMBOL_GPL(iommu_domain_free); EXPORT_SYMBOL_GPL(iommu_domain_free);
int iommu_attach_device(struct iommu_domain *domain, struct device *dev) int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
{ {
return iommu_ops->attach_dev(domain, dev); if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
return domain->ops->attach_dev(domain, dev);
} }
EXPORT_SYMBOL_GPL(iommu_attach_device); EXPORT_SYMBOL_GPL(iommu_attach_device);
void iommu_detach_device(struct iommu_domain *domain, struct device *dev) void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{ {
iommu_ops->detach_dev(domain, dev); if (unlikely(domain->ops->detach_dev == NULL))
return;
domain->ops->detach_dev(domain, dev);
} }
EXPORT_SYMBOL_GPL(iommu_detach_device); EXPORT_SYMBOL_GPL(iommu_detach_device);
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova) unsigned long iova)
{ {
return iommu_ops->iova_to_phys(domain, iova); if (unlikely(domain->ops->iova_to_phys == NULL))
return 0;
return domain->ops->iova_to_phys(domain, iova);
} }
EXPORT_SYMBOL_GPL(iommu_iova_to_phys); EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
int iommu_domain_has_cap(struct iommu_domain *domain, int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap) unsigned long cap)
{ {
return iommu_ops->domain_has_cap(domain, cap); if (unlikely(domain->ops->domain_has_cap == NULL))
return 0;
return domain->ops->domain_has_cap(domain, cap);
} }
EXPORT_SYMBOL_GPL(iommu_domain_has_cap); EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova, int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot) phys_addr_t paddr, int gfp_order, int prot)
{ {
unsigned long invalid_mask;
size_t size; size_t size;
size = 0x1000UL << gfp_order; if (unlikely(domain->ops->map == NULL))
invalid_mask = size - 1; return -ENODEV;
BUG_ON((iova | paddr) & invalid_mask); size = PAGE_SIZE << gfp_order;
return iommu_ops->map(domain, iova, paddr, gfp_order, prot); BUG_ON(!IS_ALIGNED(iova | paddr, size));
return domain->ops->map(domain, iova, paddr, gfp_order, prot);
} }
EXPORT_SYMBOL_GPL(iommu_map); EXPORT_SYMBOL_GPL(iommu_map);
int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order) int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
{ {
unsigned long invalid_mask;
size_t size; size_t size;
size = 0x1000UL << gfp_order; if (unlikely(domain->ops->unmap == NULL))
invalid_mask = size - 1; return -ENODEV;
BUG_ON(iova & invalid_mask); size = PAGE_SIZE << gfp_order;
return iommu_ops->unmap(domain, iova, gfp_order); BUG_ON(!IS_ALIGNED(iova, size));
return domain->ops->unmap(domain, iova, gfp_order);
} }
EXPORT_SYMBOL_GPL(iommu_unmap); EXPORT_SYMBOL_GPL(iommu_unmap);

View File

@ -543,6 +543,13 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
} }
ret = __flush_iotlb(domain); ret = __flush_iotlb(domain);
/*
* the IOMMU API requires us to return the order of the unmapped
* page (on success).
*/
if (!ret)
ret = order;
fail: fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags); spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret; return ret;
@ -721,7 +728,7 @@ static void __init setup_iommu_tex_classes(void)
static int __init msm_iommu_init(void) static int __init msm_iommu_init(void)
{ {
setup_iommu_tex_classes(); setup_iommu_tex_classes();
register_iommu(&msm_iommu_ops); bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0; return 0;
} }

View File

@ -21,7 +21,7 @@
#include <plat/iommu.h> #include <plat/iommu.h>
#include <plat/iovmm.h> #include <plat/iovmm.h>
#include "iopgtable.h" #include <plat/iopgtable.h>
#define MAXCOLUMN 100 /* for short messages */ #define MAXCOLUMN 100 /* for short messages */
@ -32,7 +32,7 @@ static struct dentry *iommu_debug_root;
static ssize_t debug_read_ver(struct file *file, char __user *userbuf, static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
u32 ver = iommu_arch_version(); u32 ver = omap_iommu_arch_version();
char buf[MAXCOLUMN], *p = buf; char buf[MAXCOLUMN], *p = buf;
p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf); p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
@ -43,7 +43,7 @@ static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char *p, *buf; char *p, *buf;
ssize_t bytes; ssize_t bytes;
@ -54,7 +54,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock); mutex_lock(&iommu_debug_lock);
bytes = iommu_dump_ctx(obj, p, count); bytes = omap_iommu_dump_ctx(obj, p, count);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
mutex_unlock(&iommu_debug_lock); mutex_unlock(&iommu_debug_lock);
@ -66,7 +66,7 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char *p, *buf; char *p, *buf;
ssize_t bytes, rest; ssize_t bytes, rest;
@ -80,7 +80,7 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
p += sprintf(p, "-----------------------------------------\n"); p += sprintf(p, "-----------------------------------------\n");
rest = count - (p - buf); rest = count - (p - buf);
p += dump_tlb_entries(obj, p, rest); p += omap_dump_tlb_entries(obj, p, rest);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
@ -96,7 +96,7 @@ static ssize_t debug_write_pagetable(struct file *file,
struct iotlb_entry e; struct iotlb_entry e;
struct cr_regs cr; struct cr_regs cr;
int err; int err;
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char buf[MAXCOLUMN], *p = buf; char buf[MAXCOLUMN], *p = buf;
count = min(count, sizeof(buf)); count = min(count, sizeof(buf));
@ -113,8 +113,8 @@ static ssize_t debug_write_pagetable(struct file *file,
return -EINVAL; return -EINVAL;
} }
iotlb_cr_to_e(&cr, &e); omap_iotlb_cr_to_e(&cr, &e);
err = iopgtable_store_entry(obj, &e); err = omap_iopgtable_store_entry(obj, &e);
if (err) if (err)
dev_err(obj->dev, "%s: fail to store cr\n", __func__); dev_err(obj->dev, "%s: fail to store cr\n", __func__);
@ -136,7 +136,7 @@ static ssize_t debug_write_pagetable(struct file *file,
__err; \ __err; \
}) })
static ssize_t dump_ioptable(struct iommu *obj, char *buf, ssize_t len) static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
{ {
int i; int i;
u32 *iopgd; u32 *iopgd;
@ -183,7 +183,7 @@ out:
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char *p, *buf; char *p, *buf;
size_t bytes; size_t bytes;
@ -211,7 +211,7 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
static ssize_t debug_read_mmap(struct file *file, char __user *userbuf, static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char *p, *buf; char *p, *buf;
struct iovm_struct *tmp; struct iovm_struct *tmp;
int uninitialized_var(i); int uninitialized_var(i);
@ -253,7 +253,7 @@ static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
static ssize_t debug_read_mem(struct file *file, char __user *userbuf, static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
char *p, *buf; char *p, *buf;
struct iovm_struct *area; struct iovm_struct *area;
ssize_t bytes; ssize_t bytes;
@ -267,7 +267,7 @@ static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock); mutex_lock(&iommu_debug_lock);
area = find_iovm_area(obj, (u32)ppos); area = omap_find_iovm_area(obj, (u32)ppos);
if (IS_ERR(area)) { if (IS_ERR(area)) {
bytes = -EINVAL; bytes = -EINVAL;
goto err_out; goto err_out;
@ -286,7 +286,7 @@ err_out:
static ssize_t debug_write_mem(struct file *file, const char __user *userbuf, static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct iommu *obj = file->private_data; struct omap_iommu *obj = file->private_data;
struct iovm_struct *area; struct iovm_struct *area;
char *p, *buf; char *p, *buf;
@ -304,7 +304,7 @@ static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
goto err_out; goto err_out;
} }
area = find_iovm_area(obj, (u32)ppos); area = omap_find_iovm_area(obj, (u32)ppos);
if (IS_ERR(area)) { if (IS_ERR(area)) {
count = -EINVAL; count = -EINVAL;
goto err_out; goto err_out;
@ -360,7 +360,7 @@ DEBUG_FOPS(mem);
static int iommu_debug_register(struct device *dev, void *data) static int iommu_debug_register(struct device *dev, void *data)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct iommu *obj = platform_get_drvdata(pdev); struct omap_iommu *obj = platform_get_drvdata(pdev);
struct dentry *d, *parent; struct dentry *d, *parent;
if (!obj || !obj->dev) if (!obj || !obj->dev)
@ -396,7 +396,7 @@ static int __init iommu_debug_init(void)
return -ENOMEM; return -ENOMEM;
iommu_debug_root = d; iommu_debug_root = d;
err = foreach_iommu_device(d, iommu_debug_register); err = omap_foreach_iommu_device(d, iommu_debug_register);
if (err) if (err)
goto err_out; goto err_out;
return 0; return 0;

View File

@ -18,18 +18,34 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/iommu.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <plat/iommu.h> #include <plat/iommu.h>
#include "iopgtable.h" #include <plat/iopgtable.h>
#define for_each_iotlb_cr(obj, n, __i, cr) \ #define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \ for (__i = 0; \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++) __i++)
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
spinlock_t lock;
};
/* accommodate the difference between omap1 and omap2/3 */ /* accommodate the difference between omap1 and omap2/3 */
static const struct iommu_functions *arch_iommu; static const struct iommu_functions *arch_iommu;
@ -37,13 +53,13 @@ static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep; static struct kmem_cache *iopte_cachep;
/** /**
* install_iommu_arch - Install archtecure specific iommu functions * omap_install_iommu_arch - Install archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions * @ops: a pointer to architecture specific iommu functions
* *
* There are several kind of iommu algorithm(tlb, pagetable) among * There are several kind of iommu algorithm(tlb, pagetable) among
* omap series. This interface installs such an iommu algorighm. * omap series. This interface installs such an iommu algorighm.
**/ **/
int install_iommu_arch(const struct iommu_functions *ops) int omap_install_iommu_arch(const struct iommu_functions *ops)
{ {
if (arch_iommu) if (arch_iommu)
return -EBUSY; return -EBUSY;
@ -51,53 +67,53 @@ int install_iommu_arch(const struct iommu_functions *ops)
arch_iommu = ops; arch_iommu = ops;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(install_iommu_arch); EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
/** /**
* uninstall_iommu_arch - Uninstall archtecure specific iommu functions * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
* @ops: a pointer to architecture specific iommu functions * @ops: a pointer to architecture specific iommu functions
* *
* This interface uninstalls the iommu algorighm installed previously. * This interface uninstalls the iommu algorighm installed previously.
**/ **/
void uninstall_iommu_arch(const struct iommu_functions *ops) void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
{ {
if (arch_iommu != ops) if (arch_iommu != ops)
pr_err("%s: not your arch\n", __func__); pr_err("%s: not your arch\n", __func__);
arch_iommu = NULL; arch_iommu = NULL;
} }
EXPORT_SYMBOL_GPL(uninstall_iommu_arch); EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/** /**
* iommu_save_ctx - Save registers for pm off-mode support * omap_iommu_save_ctx - Save registers for pm off-mode support
* @obj: target iommu * @obj: target iommu
**/ **/
void iommu_save_ctx(struct iommu *obj) void omap_iommu_save_ctx(struct omap_iommu *obj)
{ {
arch_iommu->save_ctx(obj); arch_iommu->save_ctx(obj);
} }
EXPORT_SYMBOL_GPL(iommu_save_ctx); EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/** /**
* iommu_restore_ctx - Restore registers for pm off-mode support * omap_iommu_restore_ctx - Restore registers for pm off-mode support
* @obj: target iommu * @obj: target iommu
**/ **/
void iommu_restore_ctx(struct iommu *obj) void omap_iommu_restore_ctx(struct omap_iommu *obj)
{ {
arch_iommu->restore_ctx(obj); arch_iommu->restore_ctx(obj);
} }
EXPORT_SYMBOL_GPL(iommu_restore_ctx); EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
/** /**
* iommu_arch_version - Return running iommu arch version * omap_iommu_arch_version - Return running iommu arch version
**/ **/
u32 iommu_arch_version(void) u32 omap_iommu_arch_version(void)
{ {
return arch_iommu->version; return arch_iommu->version;
} }
EXPORT_SYMBOL_GPL(iommu_arch_version); EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct iommu *obj) static int iommu_enable(struct omap_iommu *obj)
{ {
int err; int err;
@ -115,7 +131,7 @@ static int iommu_enable(struct iommu *obj)
return err; return err;
} }
static void iommu_disable(struct iommu *obj) static void iommu_disable(struct omap_iommu *obj)
{ {
if (!obj) if (!obj)
return; return;
@ -130,13 +146,13 @@ static void iommu_disable(struct iommu *obj)
/* /*
* TLB operations * TLB operations
*/ */
void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{ {
BUG_ON(!cr || !e); BUG_ON(!cr || !e);
arch_iommu->cr_to_e(cr, e); arch_iommu->cr_to_e(cr, e);
} }
EXPORT_SYMBOL_GPL(iotlb_cr_to_e); EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
static inline int iotlb_cr_valid(struct cr_regs *cr) static inline int iotlb_cr_valid(struct cr_regs *cr)
{ {
@ -146,7 +162,7 @@ static inline int iotlb_cr_valid(struct cr_regs *cr)
return arch_iommu->cr_valid(cr); return arch_iommu->cr_valid(cr);
} }
static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e) struct iotlb_entry *e)
{ {
if (!e) if (!e)
@ -155,23 +171,22 @@ static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
return arch_iommu->alloc_cr(obj, e); return arch_iommu->alloc_cr(obj, e);
} }
u32 iotlb_cr_to_virt(struct cr_regs *cr) static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{ {
return arch_iommu->cr_to_virt(cr); return arch_iommu->cr_to_virt(cr);
} }
EXPORT_SYMBOL_GPL(iotlb_cr_to_virt);
static u32 get_iopte_attr(struct iotlb_entry *e) static u32 get_iopte_attr(struct iotlb_entry *e)
{ {
return arch_iommu->get_pte_attr(e); return arch_iommu->get_pte_attr(e);
} }
static u32 iommu_report_fault(struct iommu *obj, u32 *da) static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{ {
return arch_iommu->fault_isr(obj, da); return arch_iommu->fault_isr(obj, da);
} }
static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
{ {
u32 val; u32 val;
@ -182,7 +197,7 @@ static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
} }
static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
{ {
u32 val; u32 val;
@ -192,12 +207,12 @@ static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
iommu_write_reg(obj, val, MMU_LOCK); iommu_write_reg(obj, val, MMU_LOCK);
} }
static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
arch_iommu->tlb_read_cr(obj, cr); arch_iommu->tlb_read_cr(obj, cr);
} }
static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{ {
arch_iommu->tlb_load_cr(obj, cr); arch_iommu->tlb_load_cr(obj, cr);
@ -211,7 +226,7 @@ static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
* @cr: contents of cam and ram register * @cr: contents of cam and ram register
* @buf: output buffer * @buf: output buffer
**/ **/
static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf) char *buf)
{ {
BUG_ON(!cr || !buf); BUG_ON(!cr || !buf);
@ -220,7 +235,7 @@ static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
} }
/* only used in iotlb iteration for-loop */ /* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n) static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{ {
struct cr_regs cr; struct cr_regs cr;
struct iotlb_lock l; struct iotlb_lock l;
@ -238,7 +253,8 @@ static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
* @obj: target iommu * @obj: target iommu
* @e: an iommu tlb entry info * @e: an iommu tlb entry info
**/ **/
int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) #ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{ {
int err = 0; int err = 0;
struct iotlb_lock l; struct iotlb_lock l;
@ -294,7 +310,20 @@ out:
clk_disable(obj->clk); clk_disable(obj->clk);
return err; return err;
} }
EXPORT_SYMBOL_GPL(load_iotlb_entry);
#else /* !PREFETCH_IOTLB */
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
return 0;
}
#endif /* !PREFETCH_IOTLB */
static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
return load_iotlb_entry(obj, e);
}
/** /**
* flush_iotlb_page - Clear an iommu tlb entry * flush_iotlb_page - Clear an iommu tlb entry
@ -303,7 +332,7 @@ EXPORT_SYMBOL_GPL(load_iotlb_entry);
* *
* Clear an iommu tlb entry which includes 'da' address. * Clear an iommu tlb entry which includes 'da' address.
**/ **/
void flush_iotlb_page(struct iommu *obj, u32 da) static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
{ {
int i; int i;
struct cr_regs cr; struct cr_regs cr;
@ -332,33 +361,12 @@ void flush_iotlb_page(struct iommu *obj, u32 da)
if (i == obj->nr_tlb_entries) if (i == obj->nr_tlb_entries)
dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
} }
EXPORT_SYMBOL_GPL(flush_iotlb_page);
/**
* flush_iotlb_range - Clear an iommu tlb entries
* @obj: target iommu
* @start: iommu device virtual address(start)
* @end: iommu device virtual address(end)
*
* Clear an iommu tlb entry which includes 'da' address.
**/
void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
{
u32 da = start;
while (da < end) {
flush_iotlb_page(obj, da);
/* FIXME: Optimize for multiple page size */
da += IOPTE_SIZE;
}
}
EXPORT_SYMBOL_GPL(flush_iotlb_range);
/** /**
* flush_iotlb_all - Clear all iommu tlb entries * flush_iotlb_all - Clear all iommu tlb entries
* @obj: target iommu * @obj: target iommu
**/ **/
void flush_iotlb_all(struct iommu *obj) static void flush_iotlb_all(struct omap_iommu *obj)
{ {
struct iotlb_lock l; struct iotlb_lock l;
@ -372,28 +380,10 @@ void flush_iotlb_all(struct iommu *obj)
clk_disable(obj->clk); clk_disable(obj->clk);
} }
EXPORT_SYMBOL_GPL(flush_iotlb_all);
/** #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
* iommu_set_twl - enable/disable table walking logic
* @obj: target iommu
* @on: enable/disable
*
* Function used to enable/disable TWL. If one wants to work
* exclusively with locked TLB entries and receive notifications
* for TLB miss then call this function to disable TWL.
*/
void iommu_set_twl(struct iommu *obj, bool on)
{
clk_enable(obj->clk);
arch_iommu->set_twl(obj, on);
clk_disable(obj->clk);
}
EXPORT_SYMBOL_GPL(iommu_set_twl);
#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
{ {
if (!obj || !buf) if (!obj || !buf)
return -EINVAL; return -EINVAL;
@ -406,9 +396,10 @@ ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
return bytes; return bytes;
} }
EXPORT_SYMBOL_GPL(iommu_dump_ctx); EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
{ {
int i; int i;
struct iotlb_lock saved; struct iotlb_lock saved;
@ -431,11 +422,11 @@ static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
} }
/** /**
* dump_tlb_entries - dump cr arrays to given buffer * omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu * @obj: target iommu
* @buf: output buffer * @buf: output buffer
**/ **/
size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
{ {
int i, num; int i, num;
struct cr_regs *cr; struct cr_regs *cr;
@ -455,14 +446,14 @@ size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
return p - buf; return p - buf;
} }
EXPORT_SYMBOL_GPL(dump_tlb_entries); EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
{ {
return driver_for_each_device(&omap_iommu_driver.driver, return driver_for_each_device(&omap_iommu_driver.driver,
NULL, data, fn); NULL, data, fn);
} }
EXPORT_SYMBOL_GPL(foreach_iommu_device); EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
@ -495,7 +486,7 @@ static void iopte_free(u32 *iopte)
kmem_cache_free(iopte_cachep, iopte); kmem_cache_free(iopte_cachep, iopte);
} }
static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
{ {
u32 *iopte; u32 *iopte;
@ -533,7 +524,7 @@ pte_ready:
return iopte; return iopte;
} }
static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
@ -548,7 +539,7 @@ static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
return 0; return 0;
} }
static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
int i; int i;
@ -565,7 +556,7 @@ static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
return 0; return 0;
} }
static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da); u32 *iopte = iopte_alloc(obj, iopgd, da);
@ -582,7 +573,7 @@ static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
return 0; return 0;
} }
static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
{ {
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
u32 *iopte = iopte_alloc(obj, iopgd, da); u32 *iopte = iopte_alloc(obj, iopgd, da);
@ -603,9 +594,10 @@ static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
return 0; return 0;
} }
static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) static int
iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
{ {
int (*fn)(struct iommu *, u32, u32, u32); int (*fn)(struct omap_iommu *, u32, u32, u32);
u32 prot; u32 prot;
int err; int err;
@ -641,23 +633,21 @@ static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
} }
/** /**
* iopgtable_store_entry - Make an iommu pte entry * omap_iopgtable_store_entry - Make an iommu pte entry
* @obj: target iommu * @obj: target iommu
* @e: an iommu tlb entry info * @e: an iommu tlb entry info
**/ **/
int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{ {
int err; int err;
flush_iotlb_page(obj, e->da); flush_iotlb_page(obj, e->da);
err = iopgtable_store_entry_core(obj, e); err = iopgtable_store_entry_core(obj, e);
#ifdef PREFETCH_IOTLB
if (!err) if (!err)
load_iotlb_entry(obj, e); prefetch_iotlb_entry(obj, e);
#endif
return err; return err;
} }
EXPORT_SYMBOL_GPL(iopgtable_store_entry); EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/** /**
* iopgtable_lookup_entry - Lookup an iommu pte entry * iopgtable_lookup_entry - Lookup an iommu pte entry
@ -666,7 +656,8 @@ EXPORT_SYMBOL_GPL(iopgtable_store_entry);
* @ppgd: iommu pgd entry pointer to be returned * @ppgd: iommu pgd entry pointer to be returned
* @ppte: iommu pte entry pointer to be returned * @ppte: iommu pte entry pointer to be returned
**/ **/
void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) static void
iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
{ {
u32 *iopgd, *iopte = NULL; u32 *iopgd, *iopte = NULL;
@ -680,9 +671,8 @@ out:
*ppgd = iopgd; *ppgd = iopgd;
*ppte = iopte; *ppte = iopte;
} }
EXPORT_SYMBOL_GPL(iopgtable_lookup_entry);
static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
{ {
size_t bytes; size_t bytes;
u32 *iopgd = iopgd_offset(obj, da); u32 *iopgd = iopgd_offset(obj, da);
@ -735,7 +725,7 @@ out:
* @obj: target iommu * @obj: target iommu
* @da: iommu device virtual address * @da: iommu device virtual address
**/ **/
size_t iopgtable_clear_entry(struct iommu *obj, u32 da) static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
{ {
size_t bytes; size_t bytes;
@ -748,9 +738,8 @@ size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
return bytes; return bytes;
} }
EXPORT_SYMBOL_GPL(iopgtable_clear_entry);
static void iopgtable_clear_entry_all(struct iommu *obj) static void iopgtable_clear_entry_all(struct omap_iommu *obj)
{ {
int i; int i;
@ -785,7 +774,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
{ {
u32 da, errs; u32 da, errs;
u32 *iopgd, *iopte; u32 *iopgd, *iopte;
struct iommu *obj = data; struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
if (!obj->refcount) if (!obj->refcount)
return IRQ_NONE; return IRQ_NONE;
@ -797,7 +787,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
/* Fault callback or TLB/PTE Dynamic loading */ /* Fault callback or TLB/PTE Dynamic loading */
if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv)) if (!report_iommu_fault(domain, obj->dev, da, 0))
return IRQ_HANDLED; return IRQ_HANDLED;
iommu_disable(obj); iommu_disable(obj);
@ -821,7 +811,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
static int device_match_by_alias(struct device *dev, void *data) static int device_match_by_alias(struct device *dev, void *data)
{ {
struct iommu *obj = to_iommu(dev); struct omap_iommu *obj = to_iommu(dev);
const char *name = data; const char *name = data;
pr_debug("%s: %s %s\n", __func__, obj->name, name); pr_debug("%s: %s %s\n", __func__, obj->name, name);
@ -830,57 +820,55 @@ static int device_match_by_alias(struct device *dev, void *data)
} }
/** /**
* iommu_set_da_range - Set a valid device address range * omap_find_iommu_device() - find an omap iommu device by name
* @obj: target iommu * @name: name of the iommu device
* @start Start of valid range *
* @end End of valid range * The generic iommu API requires the caller to provide the device
**/ * he wishes to attach to a certain iommu domain.
int iommu_set_da_range(struct iommu *obj, u32 start, u32 end) *
* Drivers generally should not bother with this as it should just
* be taken care of by the DMA-API using dev_archdata.
*
* This function is provided as an interim solution until the latter
* materializes, and omap3isp is fully migrated to the DMA-API.
*/
struct device *omap_find_iommu_device(const char *name)
{ {
return driver_find_device(&omap_iommu_driver.driver, NULL,
if (!obj) (void *)name,
return -EFAULT; device_match_by_alias);
if (end < start || !PAGE_ALIGN(start | end))
return -EINVAL;
obj->da_start = start;
obj->da_end = end;
return 0;
} }
EXPORT_SYMBOL_GPL(iommu_set_da_range); EXPORT_SYMBOL_GPL(omap_find_iommu_device);
/** /**
* iommu_get - Get iommu handler * omap_iommu_attach() - attach iommu device to an iommu domain
* @name: target iommu name * @dev: target omap iommu device
* @iopgd: page table
**/ **/
struct iommu *iommu_get(const char *name) static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
{ {
int err = -ENOMEM; int err = -ENOMEM;
struct device *dev; struct omap_iommu *obj = to_iommu(dev);
struct iommu *obj;
dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, spin_lock(&obj->iommu_lock);
device_match_by_alias);
if (!dev)
return ERR_PTR(-ENODEV);
obj = to_iommu(dev); /* an iommu device can only be attached once */
if (++obj->refcount > 1) {
mutex_lock(&obj->iommu_lock); dev_err(dev, "%s: already attached!\n", obj->name);
err = -EBUSY;
if (obj->refcount++ == 0) { goto err_enable;
err = iommu_enable(obj);
if (err)
goto err_enable;
flush_iotlb_all(obj);
} }
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
goto err_enable;
flush_iotlb_all(obj);
if (!try_module_get(obj->owner)) if (!try_module_get(obj->owner))
goto err_module; goto err_module;
mutex_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
return obj; return obj;
@ -890,59 +878,32 @@ err_module:
iommu_disable(obj); iommu_disable(obj);
err_enable: err_enable:
obj->refcount--; obj->refcount--;
mutex_unlock(&obj->iommu_lock); spin_unlock(&obj->iommu_lock);
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(iommu_get);
/** /**
* iommu_put - Put back iommu handler * omap_iommu_detach - release iommu device
* @obj: target iommu * @obj: target iommu
**/ **/
void iommu_put(struct iommu *obj) static void omap_iommu_detach(struct omap_iommu *obj)
{ {
if (!obj || IS_ERR(obj)) if (!obj || IS_ERR(obj))
return; return;
mutex_lock(&obj->iommu_lock); spin_lock(&obj->iommu_lock);
if (--obj->refcount == 0) if (--obj->refcount == 0)
iommu_disable(obj); iommu_disable(obj);
module_put(obj->owner); module_put(obj->owner);
mutex_unlock(&obj->iommu_lock); obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
} }
EXPORT_SYMBOL_GPL(iommu_put);
int iommu_set_isr(const char *name,
int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
void *priv),
void *isr_priv)
{
struct device *dev;
struct iommu *obj;
dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
device_match_by_alias);
if (!dev)
return -ENODEV;
obj = to_iommu(dev);
mutex_lock(&obj->iommu_lock);
if (obj->refcount != 0) {
mutex_unlock(&obj->iommu_lock);
return -EBUSY;
}
obj->isr = isr;
obj->isr_priv = isr_priv;
mutex_unlock(&obj->iommu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_set_isr);
/* /*
* OMAP Device MMU(IOMMU) detection * OMAP Device MMU(IOMMU) detection
@ -950,9 +911,8 @@ EXPORT_SYMBOL_GPL(iommu_set_isr);
static int __devinit omap_iommu_probe(struct platform_device *pdev) static int __devinit omap_iommu_probe(struct platform_device *pdev)
{ {
int err = -ENODEV; int err = -ENODEV;
void *p;
int irq; int irq;
struct iommu *obj; struct omap_iommu *obj;
struct resource *res; struct resource *res;
struct iommu_platform_data *pdata = pdev->dev.platform_data; struct iommu_platform_data *pdata = pdev->dev.platform_data;
@ -974,7 +934,7 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
obj->da_start = pdata->da_start; obj->da_start = pdata->da_start;
obj->da_end = pdata->da_end; obj->da_end = pdata->da_end;
mutex_init(&obj->iommu_lock); spin_lock_init(&obj->iommu_lock);
mutex_init(&obj->mmap_lock); mutex_init(&obj->mmap_lock);
spin_lock_init(&obj->page_table_lock); spin_lock_init(&obj->page_table_lock);
INIT_LIST_HEAD(&obj->mmap); INIT_LIST_HEAD(&obj->mmap);
@ -1009,22 +969,9 @@ static int __devinit omap_iommu_probe(struct platform_device *pdev)
goto err_irq; goto err_irq;
platform_set_drvdata(pdev, obj); platform_set_drvdata(pdev, obj);
p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE));
if (!p) {
err = -ENOMEM;
goto err_pgd;
}
memset(p, 0, IOPGD_TABLE_SIZE);
clean_dcache_area(p, IOPGD_TABLE_SIZE);
obj->iopgd = p;
BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE));
dev_info(&pdev->dev, "%s registered\n", obj->name); dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0; return 0;
err_pgd:
free_irq(irq, obj);
err_irq: err_irq:
iounmap(obj->regbase); iounmap(obj->regbase);
err_ioremap: err_ioremap:
@ -1040,12 +987,11 @@ static int __devexit omap_iommu_remove(struct platform_device *pdev)
{ {
int irq; int irq;
struct resource *res; struct resource *res;
struct iommu *obj = platform_get_drvdata(pdev); struct omap_iommu *obj = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL); platform_set_drvdata(pdev, NULL);
iopgtable_clear_entry_all(obj); iopgtable_clear_entry_all(obj);
free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE));
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
free_irq(irq, obj); free_irq(irq, obj);
@ -1072,6 +1018,201 @@ static void iopte_cachep_ctor(void *iopte)
clean_dcache_area(iopte, IOPTE_TABLE_SIZE); clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
} }
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, int order, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
size_t bytes = PAGE_SIZE << order;
struct iotlb_entry e;
int omap_pgsz;
u32 ret, flags;
/* we only support mapping a single iommu page for now */
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
dev_err(dev, "invalid size to map: %d\n", bytes);
return -EINVAL;
}
dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
flags = omap_pgsz | prot;
iotlb_init_entry(&e, da, pa, flags);
ret = omap_iopgtable_store_entry(oiommu, &e);
if (ret)
dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
return ret;
}
static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
int order)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
size_t unmap_size;
dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
unmap_size = iopgtable_clear_entry(oiommu, da);
return unmap_size ? get_order(unmap_size) : -EINVAL;
}
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu;
int ret = 0;
spin_lock(&omap_domain->lock);
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev) {
dev_err(dev, "iommu domain is already attached\n");
ret = -EBUSY;
goto out;
}
/* get a handle to and enable the omap iommu */
oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
if (IS_ERR(oiommu)) {
ret = PTR_ERR(oiommu);
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
omap_domain->iommu_dev = oiommu;
oiommu->domain = domain;
out:
spin_unlock(&omap_domain->lock);
return ret;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = to_iommu(dev);
spin_lock(&omap_domain->lock);
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
goto out;
}
iopgtable_clear_entry_all(oiommu);
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = NULL;
out:
spin_unlock(&omap_domain->lock);
}
static int omap_iommu_domain_init(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) {
pr_err("kzalloc failed\n");
goto out;
}
omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
if (!omap_domain->pgtable) {
pr_err("kzalloc failed\n");
goto fail_nomem;
}
/*
* should never fail, but please keep this around to ensure
* we keep the hardware happy
*/
BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
domain->priv = omap_domain;
return 0;
fail_nomem:
kfree(omap_domain);
out:
return -ENOMEM;
}
/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long da)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
phys_addr_t ret = 0;
iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
if (pte) {
if (iopte_is_small(*pte))
ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
dev_err(dev, "bogus pte 0x%x", *pte);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
dev_err(dev, "bogus pgd 0x%x", *pgd);
}
return ret;
}
static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
static struct iommu_ops omap_iommu_ops = {
.domain_init = omap_iommu_domain_init,
.domain_destroy = omap_iommu_domain_destroy,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap,
};
static int __init omap_iommu_init(void) static int __init omap_iommu_init(void)
{ {
struct kmem_cache *p; struct kmem_cache *p;
@ -1084,6 +1225,8 @@ static int __init omap_iommu_init(void)
return -ENOMEM; return -ENOMEM;
iopte_cachep = p; iopte_cachep = p;
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
return platform_driver_register(&omap_iommu_driver); return platform_driver_register(&omap_iommu_driver);
} }
module_init(omap_iommu_init); module_init(omap_iommu_init);

View File

@ -15,6 +15,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/iommu.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
@ -22,44 +23,19 @@
#include <plat/iommu.h> #include <plat/iommu.h>
#include <plat/iovmm.h> #include <plat/iovmm.h>
#include "iopgtable.h" #include <plat/iopgtable.h>
/*
* A device driver needs to create address mappings between:
*
* - iommu/device address
* - physical address
* - mpu virtual address
*
* There are 4 possible patterns for them:
*
* |iova/ mapping iommu_ page
* | da pa va (d)-(p)-(v) function type
* ---------------------------------------------------------------------------
* 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
* 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
* 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
* 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
*
*
* 'iova': device iommu virtual address
* 'da': alias of 'iova'
* 'pa': physical address
* 'va': mpu virtual address
*
* 'c': contiguous memory area
* 'd': discontiguous memory area
* 'a': anonymous memory allocation
* '()': optional feature
*
* 'n': a normal page(4KB) size is used.
* 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
*
* '*': not yet, but feasible.
*/
static struct kmem_cache *iovm_area_cachep; static struct kmem_cache *iovm_area_cachep;
/* return the offset of the first scatterlist entry in a sg table */
static unsigned int sgtable_offset(const struct sg_table *sgt)
{
if (!sgt || !sgt->nents)
return 0;
return sgt->sgl->offset;
}
/* return total bytes of sg buffers */ /* return total bytes of sg buffers */
static size_t sgtable_len(const struct sg_table *sgt) static size_t sgtable_len(const struct sg_table *sgt)
{ {
@ -72,11 +48,17 @@ static size_t sgtable_len(const struct sg_table *sgt)
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes; size_t bytes;
bytes = sg->length; bytes = sg->length + sg->offset;
if (!iopgsz_ok(bytes)) { if (!iopgsz_ok(bytes)) {
pr_err("%s: sg[%d] not iommu pagesize(%x)\n", pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
__func__, i, bytes); __func__, i, bytes, sg->offset);
return 0;
}
if (i && sg->offset) {
pr_err("%s: sg[%d] offset not allowed in internal "
"entries\n", __func__, i);
return 0; return 0;
} }
@ -197,8 +179,8 @@ static void *vmap_sg(const struct sg_table *sgt)
u32 pa; u32 pa;
int err; int err;
pa = sg_phys(sg); pa = sg_phys(sg) - sg->offset;
bytes = sg->length; bytes = sg->length + sg->offset;
BUG_ON(bytes != PAGE_SIZE); BUG_ON(bytes != PAGE_SIZE);
@ -224,7 +206,8 @@ static inline void vunmap_sg(const void *va)
vunmap(va); vunmap(va);
} }
static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
const u32 da)
{ {
struct iovm_struct *tmp; struct iovm_struct *tmp;
@ -246,12 +229,12 @@ static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
} }
/** /**
* find_iovm_area - find iovma which includes @da * omap_find_iovm_area - find iovma which includes @da
* @da: iommu device virtual address * @da: iommu device virtual address
* *
* Find the existing iovma starting at @da * Find the existing iovma starting at @da
*/ */
struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
{ {
struct iovm_struct *area; struct iovm_struct *area;
@ -261,13 +244,13 @@ struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
return area; return area;
} }
EXPORT_SYMBOL_GPL(find_iovm_area); EXPORT_SYMBOL_GPL(omap_find_iovm_area);
/* /*
* This finds the hole(area) which fits the requested address and len * This finds the hole(area) which fits the requested address and len
* in iovmas mmap, and returns the new allocated iovma. * in iovmas mmap, and returns the new allocated iovma.
*/ */
static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
size_t bytes, u32 flags) size_t bytes, u32 flags)
{ {
struct iovm_struct *new, *tmp; struct iovm_struct *new, *tmp;
@ -342,7 +325,7 @@ found:
return new; return new;
} }
static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
{ {
size_t bytes; size_t bytes;
@ -358,14 +341,14 @@ static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
} }
/** /**
* da_to_va - convert (d) to (v) * omap_da_to_va - convert (d) to (v)
* @obj: objective iommu * @obj: objective iommu
* @da: iommu device virtual address * @da: iommu device virtual address
* @va: mpu virtual address * @va: mpu virtual address
* *
* Returns mpu virtual addr which corresponds to a given device virtual addr * Returns mpu virtual addr which corresponds to a given device virtual addr
*/ */
void *da_to_va(struct iommu *obj, u32 da) void *omap_da_to_va(struct omap_iommu *obj, u32 da)
{ {
void *va = NULL; void *va = NULL;
struct iovm_struct *area; struct iovm_struct *area;
@ -383,7 +366,7 @@ out:
return va; return va;
} }
EXPORT_SYMBOL_GPL(da_to_va); EXPORT_SYMBOL_GPL(omap_da_to_va);
static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
{ {
@ -397,7 +380,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
const size_t bytes = PAGE_SIZE; const size_t bytes = PAGE_SIZE;
/* /*
* iommu 'superpage' isn't supported with 'iommu_vmalloc()' * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
*/ */
pg = vmalloc_to_page(va); pg = vmalloc_to_page(va);
BUG_ON(!pg); BUG_ON(!pg);
@ -418,74 +401,39 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
BUG_ON(!sgt); BUG_ON(!sgt);
} }
static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
size_t len)
{
unsigned int i;
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
unsigned bytes;
bytes = max_alignment(da | pa);
bytes = min_t(unsigned, bytes, iopgsz_max(len));
BUG_ON(!iopgsz_ok(bytes));
sg_set_buf(sg, phys_to_virt(pa), bytes);
/*
* 'pa' is cotinuous(linear).
*/
pa += bytes;
da += bytes;
len -= bytes;
}
BUG_ON(len);
}
static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
{
/*
* Actually this is not necessary at all, just exists for
* consistency of the code readability
*/
BUG_ON(!sgt);
}
/* create 'da' <-> 'pa' mapping from 'sgt' */ /* create 'da' <-> 'pa' mapping from 'sgt' */
static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
const struct sg_table *sgt, u32 flags) const struct sg_table *sgt, u32 flags)
{ {
int err; int err;
unsigned int i, j; unsigned int i, j;
struct scatterlist *sg; struct scatterlist *sg;
u32 da = new->da_start; u32 da = new->da_start;
int order;
if (!obj || !sgt) if (!domain || !sgt)
return -EINVAL; return -EINVAL;
BUG_ON(!sgtable_ok(sgt)); BUG_ON(!sgtable_ok(sgt));
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
u32 pa; u32 pa;
int pgsz;
size_t bytes; size_t bytes;
struct iotlb_entry e;
pa = sg_phys(sg); pa = sg_phys(sg) - sg->offset;
bytes = sg->length; bytes = sg->length + sg->offset;
flags &= ~IOVMF_PGSZ_MASK; flags &= ~IOVMF_PGSZ_MASK;
pgsz = bytes_to_iopgsz(bytes);
if (pgsz < 0) if (bytes_to_iopgsz(bytes) < 0)
goto err_out; goto err_out;
flags |= pgsz;
order = get_order(bytes);
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
i, da, pa, bytes); i, da, pa, bytes);
iotlb_init_entry(&e, da, pa, flags); err = iommu_map(domain, da, pa, order, flags);
err = iopgtable_store_entry(obj, &e);
if (err) if (err)
goto err_out; goto err_out;
@ -499,9 +447,11 @@ err_out:
for_each_sg(sgt->sgl, sg, i, j) { for_each_sg(sgt->sgl, sg, i, j) {
size_t bytes; size_t bytes;
bytes = iopgtable_clear_entry(obj, da); bytes = sg->length + sg->offset;
order = get_order(bytes);
BUG_ON(!iopgsz_ok(bytes)); /* ignore failures.. we're already handling one */
iommu_unmap(domain, da, order);
da += bytes; da += bytes;
} }
@ -509,22 +459,31 @@ err_out:
} }
/* release 'da' <-> 'pa' mapping */ /* release 'da' <-> 'pa' mapping */
static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
struct iovm_struct *area)
{ {
u32 start; u32 start;
size_t total = area->da_end - area->da_start; size_t total = area->da_end - area->da_start;
const struct sg_table *sgt = area->sgt;
struct scatterlist *sg;
int i, err;
BUG_ON(!sgtable_ok(sgt));
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
start = area->da_start; start = area->da_start;
while (total > 0) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes; size_t bytes;
int order;
bytes = iopgtable_clear_entry(obj, start); bytes = sg->length + sg->offset;
if (bytes == 0) order = get_order(bytes);
bytes = PAGE_SIZE;
else err = iommu_unmap(domain, start, order);
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", if (err < 0)
break;
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
__func__, start, bytes, area->flags); __func__, start, bytes, area->flags);
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
@ -536,7 +495,8 @@ static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
} }
/* template function for all unmapping */ /* template function for all unmapping */
static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
struct omap_iommu *obj, const u32 da,
void (*fn)(const void *), u32 flags) void (*fn)(const void *), u32 flags)
{ {
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
@ -562,7 +522,7 @@ static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
} }
sgt = (struct sg_table *)area->sgt; sgt = (struct sg_table *)area->sgt;
unmap_iovm_area(obj, area); unmap_iovm_area(domain, obj, area);
fn(area->va); fn(area->va);
@ -577,8 +537,9 @@ out:
return sgt; return sgt;
} }
static u32 map_iommu_region(struct iommu *obj, u32 da, static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
const struct sg_table *sgt, void *va, size_t bytes, u32 flags) u32 da, const struct sg_table *sgt, void *va,
size_t bytes, u32 flags)
{ {
int err = -ENOMEM; int err = -ENOMEM;
struct iovm_struct *new; struct iovm_struct *new;
@ -593,7 +554,7 @@ static u32 map_iommu_region(struct iommu *obj, u32 da,
new->va = va; new->va = va;
new->sgt = sgt; new->sgt = sgt;
if (map_iovm_area(obj, new, sgt, new->flags)) if (map_iovm_area(domain, new, sgt, new->flags))
goto err_map; goto err_map;
mutex_unlock(&obj->mmap_lock); mutex_unlock(&obj->mmap_lock);
@ -610,14 +571,16 @@ err_alloc_iovma:
return err; return err;
} }
static inline u32 __iommu_vmap(struct iommu *obj, u32 da, static inline u32
const struct sg_table *sgt, void *va, size_t bytes, u32 flags) __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
u32 da, const struct sg_table *sgt,
void *va, size_t bytes, u32 flags)
{ {
return map_iommu_region(obj, da, sgt, va, bytes, flags); return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
} }
/** /**
* iommu_vmap - (d)-(p)-(v) address mapper * omap_iommu_vmap - (d)-(p)-(v) address mapper
* @obj: objective iommu * @obj: objective iommu
* @sgt: address of scatter gather table * @sgt: address of scatter gather table
* @flags: iovma and page property * @flags: iovma and page property
@ -625,8 +588,8 @@ static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
* Creates 1-n-1 mapping with given @sgt and returns @da. * Creates 1-n-1 mapping with given @sgt and returns @da.
* All @sgt element must be io page size aligned. * All @sgt element must be io page size aligned.
*/ */
u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
u32 flags) const struct sg_table *sgt, u32 flags)
{ {
size_t bytes; size_t bytes;
void *va = NULL; void *va = NULL;
@ -648,38 +611,41 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
flags |= IOVMF_DISCONT; flags |= IOVMF_DISCONT;
flags |= IOVMF_MMIO; flags |= IOVMF_MMIO;
da = __iommu_vmap(obj, da, sgt, va, bytes, flags); da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) if (IS_ERR_VALUE(da))
vunmap_sg(va); vunmap_sg(va);
return da; return da + sgtable_offset(sgt);
} }
EXPORT_SYMBOL_GPL(iommu_vmap); EXPORT_SYMBOL_GPL(omap_iommu_vmap);
/** /**
* iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
* @obj: objective iommu * @obj: objective iommu
* @da: iommu device virtual address * @da: iommu device virtual address
* *
* Free the iommu virtually contiguous memory area starting at * Free the iommu virtually contiguous memory area starting at
* @da, which was returned by 'iommu_vmap()'. * @da, which was returned by 'omap_iommu_vmap()'.
*/ */
struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) struct sg_table *
omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
{ {
struct sg_table *sgt; struct sg_table *sgt;
/* /*
* 'sgt' is allocated before 'iommu_vmalloc()' is called. * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
* Just returns 'sgt' to the caller to free * Just returns 'sgt' to the caller to free
*/ */
sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); da &= PAGE_MASK;
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
IOVMF_DISCONT | IOVMF_MMIO);
if (!sgt) if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__); dev_dbg(obj->dev, "%s: No sgt\n", __func__);
return sgt; return sgt;
} }
EXPORT_SYMBOL_GPL(iommu_vunmap); EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
/** /**
* iommu_vmalloc - (d)-(p)-(v) address allocator and mapper * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
* @obj: objective iommu * @obj: objective iommu
* @da: contiguous iommu virtual memory * @da: contiguous iommu virtual memory
* @bytes: allocation size * @bytes: allocation size
@ -688,7 +654,9 @@ EXPORT_SYMBOL_GPL(iommu_vunmap);
* Allocate @bytes linearly and creates 1-n-1 mapping and returns * Allocate @bytes linearly and creates 1-n-1 mapping and returns
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/ */
u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) u32
omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
size_t bytes, u32 flags)
{ {
void *va; void *va;
struct sg_table *sgt; struct sg_table *sgt;
@ -712,7 +680,7 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
} }
sgtable_fill_vmalloc(sgt, va); sgtable_fill_vmalloc(sgt, va);
da = __iommu_vmap(obj, da, sgt, va, bytes, flags); da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) if (IS_ERR_VALUE(da))
goto err_iommu_vmap; goto err_iommu_vmap;
@ -725,158 +693,28 @@ err_sgt_alloc:
vfree(va); vfree(va);
return da; return da;
} }
EXPORT_SYMBOL_GPL(iommu_vmalloc); EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
/** /**
* iommu_vfree - release memory allocated by 'iommu_vmalloc()' * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
* @obj: objective iommu * @obj: objective iommu
* @da: iommu device virtual address * @da: iommu device virtual address
* *
* Frees the iommu virtually continuous memory area starting at * Frees the iommu virtually continuous memory area starting at
* @da, as obtained from 'iommu_vmalloc()'. * @da, as obtained from 'omap_iommu_vmalloc()'.
*/ */
void iommu_vfree(struct iommu *obj, const u32 da) void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
const u32 da)
{ {
struct sg_table *sgt; struct sg_table *sgt;
sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); sgt = unmap_vm_area(domain, obj, da, vfree,
IOVMF_DISCONT | IOVMF_ALLOC);
if (!sgt) if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__); dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt); sgtable_free(sgt);
} }
EXPORT_SYMBOL_GPL(iommu_vfree); EXPORT_SYMBOL_GPL(omap_iommu_vfree);
static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
size_t bytes, u32 flags)
{
struct sg_table *sgt;
sgt = sgtable_alloc(bytes, flags, da, pa);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
sgtable_fill_kmalloc(sgt, pa, da, bytes);
da = map_iommu_region(obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) {
sgtable_drain_kmalloc(sgt);
sgtable_free(sgt);
}
return da;
}
/**
* iommu_kmap - (d)-(p)-(v) address mapper
* @obj: objective iommu
* @da: contiguous iommu virtual memory
* @pa: contiguous physical memory
* @flags: iovma and page property
*
* Creates 1-1-1 mapping and returns @da again, which can be
* adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
u32 flags)
{
void *va;
if (!obj || !obj->dev || !bytes)
return -EINVAL;
bytes = PAGE_ALIGN(bytes);
va = ioremap(pa, bytes);
if (!va)
return -ENOMEM;
flags |= IOVMF_LINEAR;
flags |= IOVMF_MMIO;
da = __iommu_kmap(obj, da, pa, va, bytes, flags);
if (IS_ERR_VALUE(da))
iounmap(va);
return da;
}
EXPORT_SYMBOL_GPL(iommu_kmap);
/**
* iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
* @obj: objective iommu
* @da: iommu device virtual address
*
* Frees the iommu virtually contiguous memory area starting at
* @da, which was passed to and was returned by'iommu_kmap()'.
*/
void iommu_kunmap(struct iommu *obj, u32 da)
{
struct sg_table *sgt;
typedef void (*func_t)(const void *);
sgt = unmap_vm_area(obj, da, (func_t)iounmap,
IOVMF_LINEAR | IOVMF_MMIO);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt);
}
EXPORT_SYMBOL_GPL(iommu_kunmap);
/**
* iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
* @obj: objective iommu
* @da: contiguous iommu virtual memory
* @bytes: bytes for allocation
* @flags: iovma and page property
*
* Allocate @bytes linearly and creates 1-1-1 mapping and returns
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
{
void *va;
u32 pa;
if (!obj || !obj->dev || !bytes)
return -EINVAL;
bytes = PAGE_ALIGN(bytes);
va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
if (!va)
return -ENOMEM;
pa = virt_to_phys(va);
flags |= IOVMF_LINEAR;
flags |= IOVMF_ALLOC;
da = __iommu_kmap(obj, da, pa, va, bytes, flags);
if (IS_ERR_VALUE(da))
kfree(va);
return da;
}
EXPORT_SYMBOL_GPL(iommu_kmalloc);
/**
* iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
* @obj: objective iommu
* @da: iommu device virtual address
*
* Frees the iommu virtually contiguous memory area starting at
* @da, which was passed to and was returned by'iommu_kmalloc()'.
*/
void iommu_kfree(struct iommu *obj, u32 da)
{
struct sg_table *sgt;
sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
if (!sgt)
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
sgtable_free(sgt);
}
EXPORT_SYMBOL_GPL(iommu_kfree);
static int __init iovmm_init(void) static int __init iovmm_init(void)
{ {

View File

@ -763,8 +763,7 @@ source "drivers/media/video/m5mols/Kconfig"
config VIDEO_OMAP3 config VIDEO_OMAP3
tristate "OMAP 3 Camera support (EXPERIMENTAL)" tristate "OMAP 3 Camera support (EXPERIMENTAL)"
select OMAP_IOMMU depends on OMAP_IOVMM && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3 && EXPERIMENTAL
---help--- ---help---
Driver for an OMAP 3 camera controller. Driver for an OMAP 3 camera controller.

View File

@ -80,6 +80,13 @@
#include "isph3a.h" #include "isph3a.h"
#include "isphist.h" #include "isphist.h"
/*
* this is provided as an interim solution until omap3isp doesn't need
* any omap-specific iommu API
*/
#define to_iommu(dev) \
(struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
static unsigned int autoidle; static unsigned int autoidle;
module_param(autoidle, int, 0444); module_param(autoidle, int, 0444);
MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@ -1108,7 +1115,7 @@ static void isp_save_ctx(struct isp_device *isp)
{ {
isp_save_context(isp, isp_reg_list); isp_save_context(isp, isp_reg_list);
if (isp->iommu) if (isp->iommu)
iommu_save_ctx(isp->iommu); omap_iommu_save_ctx(isp->iommu);
} }
/* /*
@ -1122,7 +1129,7 @@ static void isp_restore_ctx(struct isp_device *isp)
{ {
isp_restore_context(isp, isp_reg_list); isp_restore_context(isp, isp_reg_list);
if (isp->iommu) if (isp->iommu)
iommu_restore_ctx(isp->iommu); omap_iommu_restore_ctx(isp->iommu);
omap3isp_ccdc_restore_context(isp); omap3isp_ccdc_restore_context(isp);
omap3isp_preview_restore_context(isp); omap3isp_preview_restore_context(isp);
} }
@ -1975,7 +1982,8 @@ static int isp_remove(struct platform_device *pdev)
isp_cleanup_modules(isp); isp_cleanup_modules(isp);
omap3isp_get(isp); omap3isp_get(isp);
iommu_put(isp->iommu); iommu_detach_device(isp->domain, isp->iommu_dev);
iommu_domain_free(isp->domain);
omap3isp_put(isp); omap3isp_put(isp);
free_irq(isp->irq_num, isp); free_irq(isp->irq_num, isp);
@ -2123,25 +2131,41 @@ static int isp_probe(struct platform_device *pdev)
} }
/* IOMMU */ /* IOMMU */
isp->iommu = iommu_get("isp"); isp->iommu_dev = omap_find_iommu_device("isp");
if (IS_ERR_OR_NULL(isp->iommu)) { if (!isp->iommu_dev) {
isp->iommu = NULL; dev_err(isp->dev, "omap_find_iommu_device failed\n");
ret = -ENODEV; ret = -ENODEV;
goto error_isp; goto error_isp;
} }
/* to be removed once iommu migration is complete */
isp->iommu = to_iommu(isp->iommu_dev);
isp->domain = iommu_domain_alloc(pdev->dev.bus);
if (!isp->domain) {
dev_err(isp->dev, "can't alloc iommu domain\n");
ret = -ENOMEM;
goto error_isp;
}
ret = iommu_attach_device(isp->domain, isp->iommu_dev);
if (ret) {
dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
goto free_domain;
}
/* Interrupt */ /* Interrupt */
isp->irq_num = platform_get_irq(pdev, 0); isp->irq_num = platform_get_irq(pdev, 0);
if (isp->irq_num <= 0) { if (isp->irq_num <= 0) {
dev_err(isp->dev, "No IRQ resource\n"); dev_err(isp->dev, "No IRQ resource\n");
ret = -ENODEV; ret = -ENODEV;
goto error_isp; goto detach_dev;
} }
if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { if (request_irq(isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) {
dev_err(isp->dev, "Unable to request IRQ\n"); dev_err(isp->dev, "Unable to request IRQ\n");
ret = -EINVAL; ret = -EINVAL;
goto error_isp; goto detach_dev;
} }
/* Entities */ /* Entities */
@ -2162,8 +2186,11 @@ error_modules:
isp_cleanup_modules(isp); isp_cleanup_modules(isp);
error_irq: error_irq:
free_irq(isp->irq_num, isp); free_irq(isp->irq_num, isp);
detach_dev:
iommu_detach_device(isp->domain, isp->iommu_dev);
free_domain:
iommu_domain_free(isp->domain);
error_isp: error_isp:
iommu_put(isp->iommu);
omap3isp_put(isp); omap3isp_put(isp);
error: error:
isp_put_clocks(isp); isp_put_clocks(isp);

View File

@ -32,6 +32,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/iommu.h>
#include <plat/iommu.h> #include <plat/iommu.h>
#include <plat/iovmm.h> #include <plat/iovmm.h>
@ -294,7 +295,9 @@ struct isp_device {
unsigned int sbl_resources; unsigned int sbl_resources;
unsigned int subclk_resources; unsigned int subclk_resources;
struct iommu *iommu; struct omap_iommu *iommu;
struct iommu_domain *domain;
struct device *iommu_dev;
struct isp_platform_callback platform_cb; struct isp_platform_callback platform_cb;
}; };

View File

@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl, dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE); req->iovm->sgt->nents, DMA_TO_DEVICE);
if (req->table) if (req->table)
iommu_vfree(isp->iommu, req->table); omap_iommu_vfree(isp->domain, isp->iommu, req->table);
kfree(req); kfree(req);
} }
@ -438,15 +438,15 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1; req->enable = 1;
req->table = iommu_vmalloc(isp->iommu, 0, req->config.size, req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
IOMMU_FLAG); req->config.size, IOMMU_FLAG);
if (IS_ERR_VALUE(req->table)) { if (IS_ERR_VALUE(req->table)) {
req->table = 0; req->table = 0;
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
} }
req->iovm = find_iovm_area(isp->iommu, req->table); req->iovm = omap_find_iovm_area(isp->iommu, req->table);
if (req->iovm == NULL) { if (req->iovm == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto done; goto done;
@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl, dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE); req->iovm->sgt->nents, DMA_TO_DEVICE);
table = da_to_va(isp->iommu, req->table); table = omap_da_to_va(isp->iommu, req->table);
if (copy_from_user(table, config->lsc, req->config.size)) { if (copy_from_user(table, config->lsc, req->config.size)) {
ret = -EFAULT; ret = -EFAULT;
goto done; goto done;
@ -731,18 +731,19 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
/* /*
* table_new must be 64-bytes aligned, but it's * table_new must be 64-bytes aligned, but it's
* already done by iommu_vmalloc(). * already done by omap_iommu_vmalloc().
*/ */
size = ccdc->fpc.fpnum * 4; size = ccdc->fpc.fpnum * 4;
table_new = iommu_vmalloc(isp->iommu, 0, size, table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
IOMMU_FLAG); 0, size, IOMMU_FLAG);
if (IS_ERR_VALUE(table_new)) if (IS_ERR_VALUE(table_new))
return -ENOMEM; return -ENOMEM;
if (copy_from_user(da_to_va(isp->iommu, table_new), if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
(__force void __user *) (__force void __user *)
ccdc->fpc.fpcaddr, size)) { ccdc->fpc.fpcaddr, size)) {
iommu_vfree(isp->iommu, table_new); omap_iommu_vfree(isp->domain, isp->iommu,
table_new);
return -EFAULT; return -EFAULT;
} }
@ -752,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc_configure_fpc(ccdc); ccdc_configure_fpc(ccdc);
if (table_old != 0) if (table_old != 0)
iommu_vfree(isp->iommu, table_old); omap_iommu_vfree(isp->domain, isp->iommu, table_old);
} }
return ccdc_lsc_config(ccdc, ccdc_struct); return ccdc_lsc_config(ccdc, ccdc_struct);
@ -2287,5 +2288,5 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0) if (ccdc->fpc.fpcaddr != 0)
iommu_vfree(isp->iommu, ccdc->fpc.fpcaddr); omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
} }

View File

@ -366,7 +366,8 @@ static void isp_stat_bufs_free(struct ispstat *stat)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents, buf->iovm->sgt->nents,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
iommu_vfree(isp->iommu, buf->iommu_addr); omap_iommu_vfree(isp->domain, isp->iommu,
buf->iommu_addr);
} else { } else {
if (!buf->virt_addr) if (!buf->virt_addr)
continue; continue;
@ -399,8 +400,8 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
struct iovm_struct *iovm; struct iovm_struct *iovm;
WARN_ON(buf->dma_addr); WARN_ON(buf->dma_addr);
buf->iommu_addr = iommu_vmalloc(isp->iommu, 0, size, buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
IOMMU_FLAG); size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) { if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev, dev_err(stat->isp->dev,
"%s: Can't acquire memory for " "%s: Can't acquire memory for "
@ -409,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
return -ENOMEM; return -ENOMEM;
} }
iovm = find_iovm_area(isp->iommu, buf->iommu_addr); iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
if (!iovm || if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) { DMA_FROM_DEVICE)) {
@ -418,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
} }
buf->iovm = iovm; buf->iovm = iovm;
buf->virt_addr = da_to_va(stat->isp->iommu, buf->virt_addr = omap_da_to_va(stat->isp->iommu,
(u32)buf->iommu_addr); (u32)buf->iommu_addr);
buf->empty = 1; buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."

View File

@ -446,7 +446,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
sgt->nents = sglen; sgt->nents = sglen;
sgt->orig_nents = sglen; sgt->orig_nents = sglen;
da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG); da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
if (IS_ERR_VALUE(da)) if (IS_ERR_VALUE(da))
kfree(sgt); kfree(sgt);
@ -462,7 +462,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
{ {
struct sg_table *sgt; struct sg_table *sgt;
sgt = iommu_vunmap(isp->iommu, (u32)da); sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
kfree(sgt); kfree(sgt);
} }

View File

@ -33,6 +33,7 @@ struct class;
struct subsys_private; struct subsys_private;
struct bus_type; struct bus_type;
struct device_node; struct device_node;
struct iommu_ops;
struct bus_attribute { struct bus_attribute {
struct attribute attr; struct attribute attr;
@ -67,6 +68,9 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @resume: Called to bring a device on this bus out of sleep mode. * @resume: Called to bring a device on this bus out of sleep mode.
* @pm: Power management operations of this bus, callback the specific * @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops. * device driver's pm-ops.
* @iommu_ops IOMMU specific operations for this bus, used to attach IOMMU
* driver implementations to a bus and allow the driver to do
* bus-specific setup
* @p: The private data of the driver core, only the driver core can * @p: The private data of the driver core, only the driver core can
* touch this. * touch this.
* *
@ -96,6 +100,8 @@ struct bus_type {
const struct dev_pm_ops *pm; const struct dev_pm_ops *pm;
struct iommu_ops *iommu_ops;
struct subsys_private *p; struct subsys_private *p;
}; };

View File

@ -25,15 +25,29 @@
#define IOMMU_WRITE (2) #define IOMMU_WRITE (2)
#define IOMMU_CACHE (4) /* DMA cache coherency */ #define IOMMU_CACHE (4) /* DMA cache coherency */
struct iommu_ops;
struct bus_type;
struct device; struct device;
struct iommu_domain;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
#define IOMMU_FAULT_WRITE 0x1
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int);
struct iommu_domain { struct iommu_domain {
struct iommu_ops *ops;
void *priv; void *priv;
iommu_fault_handler_t handler;
}; };
#define IOMMU_CAP_CACHE_COHERENCY 0x1 #define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
#ifdef CONFIG_IOMMU_API
struct iommu_ops { struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain); int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain);
@ -49,11 +63,9 @@ struct iommu_ops {
unsigned long cap); unsigned long cap);
}; };
#ifdef CONFIG_IOMMU_API extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus);
extern void register_iommu(struct iommu_ops *ops); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
extern bool iommu_found(void);
extern struct iommu_domain *iommu_domain_alloc(void);
extern void iommu_domain_free(struct iommu_domain *domain); extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain, extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev); struct device *dev);
@ -67,19 +79,58 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova); unsigned long iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain, extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap); unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
* @dev: the device where the fault has happened
* @iova: the faulting address
* @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
*
* This function should be called by the low-level IOMMU implementations
* whenever IOMMU faults happen, to allow high-level users, that are
* interested in such events, to know about them.
*
* This event may be useful for several possible use cases:
* - mere logging of the event
* - dynamic TLB/PTE loading
* - if restarting of the faulting device is required
*
* Returns 0 on success and an appropriate error code otherwise (if dynamic
* PTE/TLB loading will one day be supported, implementations will be able
* to tell whether it succeeded or not according to this return value).
*
* Specifically, -ENOSYS is returned if a fault handler isn't installed
* (though fault handlers can also return -ENOSYS, in case they want to
* elicit the default behavior of the IOMMU drivers).
*/
static inline int report_iommu_fault(struct iommu_domain *domain,
struct device *dev, unsigned long iova, int flags)
{
int ret = -ENOSYS;
/*
* if upper layers showed interest and installed a fault handler,
* invoke it.
*/
if (domain->handler)
ret = domain->handler(domain, dev, iova, flags);
return ret;
}
#else /* CONFIG_IOMMU_API */ #else /* CONFIG_IOMMU_API */
static inline void register_iommu(struct iommu_ops *ops) struct iommu_ops {};
{
}
static inline bool iommu_found(void) static inline bool iommu_present(struct bus_type *bus)
{ {
return false; return false;
} }
static inline struct iommu_domain *iommu_domain_alloc(void) static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{ {
return NULL; return NULL;
} }
@ -123,6 +174,11 @@ static inline int domain_has_cap(struct iommu_domain *domain,
return 0; return 0;
} }
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler)
{
}
#endif /* CONFIG_IOMMU_API */ #endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */ #endif /* __LINUX_IOMMU_H */

View File

@ -62,6 +62,8 @@ struct dma_debug_entry {
#endif #endif
}; };
typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
struct hash_bucket { struct hash_bucket {
struct list_head list; struct list_head list;
spinlock_t lock; spinlock_t lock;
@ -240,18 +242,37 @@ static void put_hash_bucket(struct hash_bucket *bucket,
spin_unlock_irqrestore(&bucket->lock, __flags); spin_unlock_irqrestore(&bucket->lock, __flags);
} }
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{
return ((a->dev_addr == a->dev_addr) &&
(a->dev == b->dev)) ? true : false;
}
static bool containing_match(struct dma_debug_entry *a,
struct dma_debug_entry *b)
{
if (a->dev != b->dev)
return false;
if ((b->dev_addr <= a->dev_addr) &&
((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
return true;
return false;
}
/* /*
* Search a given entry in the hash bucket list * Search a given entry in the hash bucket list
*/ */
static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
struct dma_debug_entry *ref) struct dma_debug_entry *ref,
match_fn match)
{ {
struct dma_debug_entry *entry, *ret = NULL; struct dma_debug_entry *entry, *ret = NULL;
int matches = 0, match_lvl, last_lvl = 0; int matches = 0, match_lvl, last_lvl = 0;
list_for_each_entry(entry, &bucket->list, list) { list_for_each_entry(entry, &bucket->list, list) {
if ((entry->dev_addr != ref->dev_addr) || if (!match(ref, entry))
(entry->dev != ref->dev))
continue; continue;
/* /*
@ -293,6 +314,39 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
return ret; return ret;
} }
static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
struct dma_debug_entry *ref)
{
return __hash_bucket_find(bucket, ref, exact_match);
}
static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
struct dma_debug_entry *ref,
unsigned long *flags)
{
unsigned int max_range = dma_get_max_seg_size(ref->dev);
struct dma_debug_entry *entry, index = *ref;
unsigned int range = 0;
while (range <= max_range) {
entry = __hash_bucket_find(*bucket, &index, containing_match);
if (entry)
return entry;
/*
* Nothing found, go back a hash bucket
*/
put_hash_bucket(*bucket, flags);
range += (1 << HASH_FN_SHIFT);
index.dev_addr -= (1 << HASH_FN_SHIFT);
*bucket = get_hash_bucket(&index, flags);
}
return NULL;
}
/* /*
* Add an entry to a hash bucket * Add an entry to a hash bucket
*/ */
@ -802,7 +856,7 @@ static void check_unmap(struct dma_debug_entry *ref)
} }
bucket = get_hash_bucket(ref, &flags); bucket = get_hash_bucket(ref, &flags);
entry = hash_bucket_find(bucket, ref); entry = bucket_find_exact(bucket, ref);
if (!entry) { if (!entry) {
err_printk(ref->dev, NULL, "DMA-API: device driver tries " err_printk(ref->dev, NULL, "DMA-API: device driver tries "
@ -902,7 +956,7 @@ static void check_sync(struct device *dev,
bucket = get_hash_bucket(ref, &flags); bucket = get_hash_bucket(ref, &flags);
entry = hash_bucket_find(bucket, ref); entry = bucket_find_contain(&bucket, ref, &flags);
if (!entry) { if (!entry) {
err_printk(dev, NULL, "DMA-API: device driver tries " err_printk(dev, NULL, "DMA-API: device driver tries "
@ -1060,7 +1114,7 @@ static int get_nr_mapped_entries(struct device *dev,
int mapped_ents; int mapped_ents;
bucket = get_hash_bucket(ref, &flags); bucket = get_hash_bucket(ref, &flags);
entry = hash_bucket_find(bucket, ref); entry = bucket_find_exact(bucket, ref);
mapped_ents = 0; mapped_ents = 0;
if (entry) if (entry)

View File

@ -232,12 +232,12 @@ int kvm_iommu_map_guest(struct kvm *kvm)
{ {
int r; int r;
if (!iommu_found()) { if (!iommu_present(&pci_bus_type)) {
printk(KERN_ERR "%s: iommu not found\n", __func__); printk(KERN_ERR "%s: iommu not found\n", __func__);
return -ENODEV; return -ENODEV;
} }
kvm->arch.iommu_domain = iommu_domain_alloc(); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
if (!kvm->arch.iommu_domain) if (!kvm->arch.iommu_domain)
return -ENOMEM; return -ENOMEM;