1
0
Fork 0

Merge branch 'arm/renesas' into arm/smmu

alistair/sunxi64-5.4-dsi
Joerg Roedel 2019-07-01 14:41:24 +02:00
commit 3430abd6f4
1 changed files with 125 additions and 62 deletions

View File

@ -36,12 +36,16 @@
#define arm_iommu_detach_device(...) do {} while (0)
#endif
#define IPMMU_CTX_MAX 8
#define IPMMU_CTX_MAX 8U
#define IPMMU_CTX_INVALID -1
#define IPMMU_UTLB_MAX 48U
struct ipmmu_features {
bool use_ns_alias_offset;
bool has_cache_leaf_nodes;
unsigned int number_of_contexts;
unsigned int num_utlbs;
bool setup_imbuscr;
bool twobit_imttbcr_sl0;
bool reserved_context;
@ -53,11 +57,11 @@ struct ipmmu_vmsa_device {
struct iommu_device iommu;
struct ipmmu_vmsa_device *root;
const struct ipmmu_features *features;
unsigned int num_utlbs;
unsigned int num_ctx;
spinlock_t lock; /* Protects ctx and domains[] */
DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
s8 utlb_ctx[IPMMU_UTLB_MAX];
struct iommu_group *group;
struct dma_iommu_mapping *mapping;
@ -186,7 +190,8 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
#define IMMAIR_ATTR_IDX_WBRWA 1
#define IMMAIR_ATTR_IDX_DEV 2
#define IMEAR 0x0030
#define IMELAR 0x0030 /* IMEAR on R-Car Gen2 */
#define IMEUAR 0x0034 /* R-Car Gen3 only */
#define IMPCTR 0x0200
#define IMPSTR 0x0208
@ -334,6 +339,7 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb),
IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH |
IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
/*
@ -345,6 +351,7 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
struct ipmmu_vmsa_device *mmu = domain->mmu;
ipmmu_write(mmu, IMUCTR(utlb), 0);
mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
}
static void ipmmu_tlb_flush_all(void *cookie)
@ -403,53 +410,10 @@ static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
spin_unlock_irqrestore(&mmu->lock, flags);
}
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
{
u64 ttbr;
u32 tmp;
int ret;
/*
* Allocate the page table operations.
*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
* access, Long-descriptor format" that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set. The IPMMU seems
* not to comply with this, as it generates a secure access page fault
* if any of the NStable and NS bits isn't set when running in
* non-secure mode.
*/
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
domain->cfg.coherent_walk = false;
domain->cfg.iommu_dev = domain->mmu->root->dev;
/*
* Find an unused context.
*/
ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
if (ret < 0)
return ret;
domain->context_id = ret;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
if (!domain->iop) {
ipmmu_domain_free_context(domain->mmu->root,
domain->context_id);
return -EINVAL;
}
/* TTBR0 */
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
@ -495,7 +459,55 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
*/
ipmmu_ctx_write_all(domain, IMCTR,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
}
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
int ret;
/*
* Allocate the page table operations.
*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
* access, Long-descriptor format" that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set. The IPMMU seems
* not to comply with this, as it generates a secure access page fault
* if any of the NStable and NS bits isn't set when running in
* non-secure mode.
*/
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
domain->cfg.coherent_walk = false;
domain->cfg.iommu_dev = domain->mmu->root->dev;
/*
* Find an unused context.
*/
ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
if (ret < 0)
return ret;
domain->context_id = ret;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
if (!domain->iop) {
ipmmu_domain_free_context(domain->mmu->root,
domain->context_id);
return -EINVAL;
}
ipmmu_domain_setup_context(domain);
return 0;
}
@ -523,14 +535,16 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
{
const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
struct ipmmu_vmsa_device *mmu = domain->mmu;
unsigned long iova;
u32 status;
u32 iova;
status = ipmmu_ctx_read_root(domain, IMSTR);
if (!(status & err_mask))
return IRQ_NONE;
iova = ipmmu_ctx_read_root(domain, IMEAR);
iova = ipmmu_ctx_read_root(domain, IMELAR);
if (IS_ENABLED(CONFIG_64BIT))
iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
/*
* Clear the error status flags. Unlike traditional interrupt flag
@ -542,10 +556,10 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
/* Log fatal errors. */
if (status & IMSTR_MHIT)
dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n",
dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
iova);
if (status & IMSTR_ABORT)
dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n",
dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
iova);
if (!(status & (IMSTR_PF | IMSTR_TF)))
@ -561,7 +575,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
"Unhandled fault: status 0x%08x iova 0x%08x\n",
"Unhandled fault: status 0x%08x iova 0x%lx\n",
status, iova);
return IRQ_HANDLED;
@ -886,27 +900,37 @@ error:
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
struct iommu_group *group;
int ret;
/*
* Only let through devices that have been verified in xlate()
*/
if (!to_ipmmu(dev))
if (!mmu)
return -ENODEV;
if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
return ipmmu_init_arm_mapping(dev);
if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)) {
ret = ipmmu_init_arm_mapping(dev);
if (ret)
return ret;
} else {
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
}
iommu_group_put(group);
iommu_device_link(&mmu->iommu, dev);
return 0;
}
static void ipmmu_remove_device(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
iommu_device_unlink(&mmu->iommu, dev);
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
}
@ -960,6 +984,7 @@ static const struct ipmmu_features ipmmu_features_default = {
.use_ns_alias_offset = true,
.has_cache_leaf_nodes = false,
.number_of_contexts = 1, /* software only tested with one context */
.num_utlbs = 32,
.setup_imbuscr = true,
.twobit_imttbcr_sl0 = false,
.reserved_context = false,
@ -969,6 +994,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 8,
.num_utlbs = 48,
.setup_imbuscr = false,
.twobit_imttbcr_sl0 = true,
.reserved_context = true,
@ -1021,10 +1047,10 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
mmu->num_utlbs = 48;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
/* Map I/O memory and request IRQ. */
@ -1048,8 +1074,7 @@ static int ipmmu_probe(struct platform_device *pdev)
if (mmu->features->use_ns_alias_offset)
mmu->base += IM_NS_ALIAS_OFFSET;
mmu->num_ctx = min_t(unsigned int, IPMMU_CTX_MAX,
mmu->features->number_of_contexts);
mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
irq = platform_get_irq(pdev, 0);
@ -1141,10 +1166,48 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ipmmu_resume_noirq(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
unsigned int i;
/* Reset root MMU and restore contexts */
if (ipmmu_is_root(mmu)) {
ipmmu_device_reset(mmu);
for (i = 0; i < mmu->num_ctx; i++) {
if (!mmu->domains[i])
continue;
ipmmu_domain_setup_context(mmu->domains[i]);
}
}
/* Re-enable active micro-TLBs */
for (i = 0; i < mmu->features->num_utlbs; i++) {
if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
continue;
ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
}
return 0;
}
static const struct dev_pm_ops ipmmu_pm = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
};
#define DEV_PM_OPS &ipmmu_pm
#else
#define DEV_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
.of_match_table = of_match_ptr(ipmmu_of_ids),
.pm = DEV_PM_OPS,
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,