From ffcb6d1686ceb4a6b50776fb2597ab0e4dd79040 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 17 Sep 2015 17:42:16 +0100 Subject: [PATCH 1/6] iommu/io-pgtable-arm: Don't use dma_to_phys() In checking whether DMA addresses differ from physical addresses, using dma_to_phys() is actually the wrong thing to do, since it may hide any DMA offset, which is precisely one of the things we are checking for. Simply casting between the two address types, whilst ugly, is in fact the appropriate course of action. Further care (and ugliness) is also necessary in the comparison to avoid truncation if phys_addr_t and dma_addr_t differ in size. We can also reject any device with a fixed DMA offset up-front at page table creation, leaving the allocation-time check for the more subtle cases like bounce buffering due to an incorrect DMA mask. Furthermore, we can then fix the hackish KConfig dependency so that architectures without a dma_to_phys() implementation may still COMPILE_TEST (or even use!) the code. The true dependency is on the DMA API, so use the appropriate symbol for that. Signed-off-by: Robin Murphy [will: folded in selftest fix from Yong Wu] Signed-off-by: Will Deacon --- drivers/iommu/Kconfig | 3 +-- drivers/iommu/io-pgtable-arm.c | 24 +++++++++++++----------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..3dc1bcb0d01d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - # SWIOTLB guarantees a dma_to_phys() implementation - depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) + depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; static bool selftest_running = false; -static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) +static dma_addr_t __arm_lpae_dma_addr(void *pages) { - return phys_to_dma(dev, virt_to_phys(pages)); + return (dma_addr_t)virt_to_phys(pages); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, @@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, goto out_free; /* * We depend on the IOMMU being able to work with any physical - * address directly, so if the DMA layer suggests it can't by - * giving us back some translation, that bodes very badly... + * address directly, so if the DMA layer suggests otherwise by + * translating or truncating them, that bodes very badly... */ - if (dma != __arm_lpae_dma_addr(dev, pages)) + if (dma != virt_to_phys(pages)) goto out_unmap; } @@ -243,10 +243,8 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - if (!selftest_running) - dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), + dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); } @@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - *ptep = pte; if (!selftest_running) - dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), + dma_sync_single_for_device(cfg->iommu_dev, + __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } @@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) return NULL; + if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { + dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); + return NULL; + } + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; From f0c453dbcce7767cd868deb809ba68083c93954e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 20 Aug 2015 12:12:32 +0100 Subject: [PATCH 2/6] iommu/arm-smmu: Ensure IAS is set correctly for AArch32-capable SMMUs AArch32-capable SMMU implementations have a minimum IAS of 40 bits, so ensure that is reflected in the stage-2 page table configuration. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..a24f359fa0d0 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -56,6 +56,7 @@ #define IDR0_TTF_SHIFT 2 #define IDR0_TTF_MASK 0x3 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) +#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) #define IDR0_S1P (1 << 1) #define IDR0_S2P (1 << 0) @@ -2460,7 +2461,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) } /* We only support the AArch64 table format at present */ - if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { + switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { + case IDR0_TTF_AARCH32_64: + smmu->ias = 40; + /* Fallthrough */ + case IDR0_TTF_AARCH64: + break; + default: dev_err(smmu->dev, "AArch64 table format not supported!\n"); return -ENXIO; } @@ -2541,8 +2548,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); - if (!smmu->ias) - smmu->ias = smmu->oas; + smmu->ias = max(smmu->ias, smmu->oas); dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); From 1c27df1c0a82b938d8073a60243ff62eff8056b5 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 18 Sep 2015 16:12:56 +0100 Subject: [PATCH 3/6] iommu/arm-smmu: Use correct address mask for CMD_TLBI_S2_IPA Stage-2 TLBI by IPA takes a 48-bit address field, as opposed to the 64-bit field used by the VA-based invalidation commands. This patch re-jigs the SMMUv3 command construction code so that the address field is correctly masked. Signed-off-by: Will Deacon --- drivers/iommu/arm-smmu-v3.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index a24f359fa0d0..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -343,7 +343,8 @@ #define CMDQ_TLBI_0_VMID_SHIFT 32 #define CMDQ_TLBI_0_ASID_SHIFT 48 #define CMDQ_TLBI_1_LEAF (1UL << 0) -#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL +#define CMDQ_TLBI_1_VA_MASK ~0xfffUL +#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL #define CMDQ_PRI_0_SSID_SHIFT 12 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL @@ -771,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_VA: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; - /* Fallthrough */ + cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; + break; case CMDQ_OP_TLBI_S2_IPA: cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; - cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; From 499f3aa4323775d5320bdf7ccc26576c54a54169 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Fri, 18 Sep 2015 16:27:07 +0530 Subject: [PATCH 4/6] iommu/vt-d: Fix memory leak in dmar_insert_one_dev_info() We are returning NULL if we are not able to attach the iommu to the domain but while returning we missed freeing info. Signed-off-by: Sudip Mukherjee Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..da99310c1b25 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2301,6 +2301,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); + free_devinfo_mem(info); return NULL; } From cbbc00be2ce3af5d708226a9563fa27cb4e6b6b8 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Fri, 9 Oct 2015 22:07:31 +0800 Subject: [PATCH 5/6] iommu/amd: Prevent binding other PCI drivers to IOMMU PCI devices AMD IOMMU driver makes use of IOMMU PCI devices, so prevent binding other PCI drivers to IOMMU PCI devices. This fixes a bug reported by Boris that system suspend/resume gets broken on AMD platforms. For more information, please refer to: https://lkml.org/lkml/2015/9/26/89 Fixes: 991de2e59090 ("PCI, x86: Implement pcibios_alloc_irq() and pcibios_free_irq()") Signed-off-by: Jiang Liu Cc: Borislav Petkov Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu_init.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (!iommu->dev) return -ENODEV; + /* Prevent binding other PCI device drivers to IOMMU devices */ + iommu->dev->match_driver = false; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, From 5adad9915472e180712030d730cdc476c6f8a60b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Oct 2015 16:23:33 +0200 Subject: [PATCH 6/6] iommu/amd: Fix NULL pointer deref on device detach When a device group is detached from its domain, the iommu core code calls into the iommu driver to detach each device individually. Before this functionality went into the iommu core code, it was implemented in the drivers, also in the AMD IOMMU driver as the device alias handling code. This code is still present, as there might be aliases that don't exist as real PCI devices (and are therefore invisible to the iommu core code). Unfortunatly it might happen now, that a device is unbound multiple times from its domain, first by the alias handling code and then by the iommu core code (or vice verca). This ends up in the do_detach function which dereferences the dev_data->domain pointer. When the device is already detached, this pointer is NULL and we get a kernel oops. Removing the alias code completly is not an option, as that would also remove the code which handles invisible aliases. The code could be simplified, but this is too big of a change outside the merge window. For now, just check the dev_data->domain pointer in do_detach and bail out if it is NULL. Reported-by: Andreas Hartmann Signed-off-by: Joerg Roedel --- drivers/iommu/amd_iommu.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..08d2775887f7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct amd_iommu *iommu; + /* + * First check if the device is still attached. It might already + * be detached from its domain because the generic + * iommu_detach_group code detached it and we try again here in + * our alias handling. + */ + if (!dev_data->domain) + return; + iommu = amd_iommu_rlookup_table[dev_data->devid]; /* decrease reference counters */