iommu/vt-d: Cleanup log messages

Give them a common prefix that can be grepped for and
improve the wording here and there.

Tested-by: ZhenHua Li <zhen-hual@hp.com>
Tested-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Joerg Roedel 2015-06-12 09:57:06 +02:00
parent 0f57d86787
commit 9f10e5bf62
3 changed files with 101 additions and 110 deletions

View file

@ -26,7 +26,7 @@
* These routines are used by both DMA-remapping and Interrupt-remapping * These routines are used by both DMA-remapping and Interrupt-remapping
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */ #define pr_fmt(fmt) "DMAR: " fmt
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dmar.h> #include <linux/dmar.h>
@ -555,7 +555,7 @@ static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
break; break;
} else if (next > end) { } else if (next > end) {
/* Avoid passing table end */ /* Avoid passing table end */
pr_warn(FW_BUG "record passes table end\n"); pr_warn(FW_BUG "Record passes table end\n");
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
@ -802,7 +802,7 @@ int __init dmar_table_init(void)
ret = parse_dmar_table(); ret = parse_dmar_table();
if (ret < 0) { if (ret < 0) {
if (ret != -ENODEV) if (ret != -ENODEV)
pr_info("parse DMAR table failure.\n"); pr_info("Parse DMAR table failure.\n");
} else if (list_empty(&dmar_drhd_units)) { } else if (list_empty(&dmar_drhd_units)) {
pr_info("No DMAR devices found\n"); pr_info("No DMAR devices found\n");
ret = -ENODEV; ret = -ENODEV;
@ -847,7 +847,7 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
else else
addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
if (!addr) { if (!addr) {
pr_warn("IOMMU: can't validate: %llx\n", drhd->address); pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
return -EINVAL; return -EINVAL;
} }
@ -921,14 +921,14 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
iommu->reg_size = VTD_PAGE_SIZE; iommu->reg_size = VTD_PAGE_SIZE;
if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
pr_err("IOMMU: can't reserve memory\n"); pr_err("Can't reserve memory\n");
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
if (!iommu->reg) { if (!iommu->reg) {
pr_err("IOMMU: can't map the region\n"); pr_err("Can't map the region\n");
err = -ENOMEM; err = -ENOMEM;
goto release; goto release;
} }
@ -952,13 +952,13 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
iommu->reg_size = map_size; iommu->reg_size = map_size;
if (!request_mem_region(iommu->reg_phys, iommu->reg_size, if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
iommu->name)) { iommu->name)) {
pr_err("IOMMU: can't reserve memory\n"); pr_err("Can't reserve memory\n");
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size); iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
if (!iommu->reg) { if (!iommu->reg) {
pr_err("IOMMU: can't map the region\n"); pr_err("Can't map the region\n");
err = -ENOMEM; err = -ENOMEM;
goto release; goto release;
} }
@ -1014,14 +1014,14 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return -ENOMEM; return -ENOMEM;
if (dmar_alloc_seq_id(iommu) < 0) { if (dmar_alloc_seq_id(iommu) < 0) {
pr_err("IOMMU: failed to allocate seq_id\n"); pr_err("Failed to allocate seq_id\n");
err = -ENOSPC; err = -ENOSPC;
goto error; goto error;
} }
err = map_iommu(iommu, drhd->reg_base_addr); err = map_iommu(iommu, drhd->reg_base_addr);
if (err) { if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name); pr_err("Failed to map %s\n", iommu->name);
goto error_free_seq_id; goto error_free_seq_id;
} }
@ -1045,8 +1045,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->node = -1; iommu->node = -1;
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
iommu->seq_id, iommu->name,
(unsigned long long)drhd->reg_base_addr, (unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
@ -1644,7 +1644,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
irq = dmar_alloc_hwirq(); irq = dmar_alloc_hwirq();
if (irq <= 0) { if (irq <= 0) {
pr_err("IOMMU: no free vectors\n"); pr_err("No free IRQ vectors\n");
return -EINVAL; return -EINVAL;
} }
@ -1661,7 +1661,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret) if (ret)
pr_err("IOMMU: can't request irq\n"); pr_err("Can't request irq\n");
return ret; return ret;
} }

View file

@ -15,8 +15,11 @@
* Shaohua Li <shaohua.li@intel.com>, * Shaohua Li <shaohua.li@intel.com>,
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>, * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com> * Fenghua Yu <fenghua.yu@intel.com>
* Joerg Roedel <jroedel@suse.de>
*/ */
#define pr_fmt(fmt) "DMAR: " fmt
#include <linux/init.h> #include <linux/init.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
@ -453,25 +456,21 @@ static int __init intel_iommu_setup(char *str)
while (*str) { while (*str) {
if (!strncmp(str, "on", 2)) { if (!strncmp(str, "on", 2)) {
dmar_disabled = 0; dmar_disabled = 0;
printk(KERN_INFO "Intel-IOMMU: enabled\n"); pr_info("IOMMU enabled\n");
} else if (!strncmp(str, "off", 3)) { } else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1; dmar_disabled = 1;
printk(KERN_INFO "Intel-IOMMU: disabled\n"); pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) { } else if (!strncmp(str, "igfx_off", 8)) {
dmar_map_gfx = 0; dmar_map_gfx = 0;
printk(KERN_INFO pr_info("Disable GFX device mapping\n");
"Intel-IOMMU: disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) { } else if (!strncmp(str, "forcedac", 8)) {
printk(KERN_INFO pr_info("Forcing DAC for PCI devices\n");
"Intel-IOMMU: Forcing DAC for PCI devices\n");
dmar_forcedac = 1; dmar_forcedac = 1;
} else if (!strncmp(str, "strict", 6)) { } else if (!strncmp(str, "strict", 6)) {
printk(KERN_INFO pr_info("Disable batched IOTLB flush\n");
"Intel-IOMMU: disable batched IOTLB flush\n");
intel_iommu_strict = 1; intel_iommu_strict = 1;
} else if (!strncmp(str, "sp_off", 6)) { } else if (!strncmp(str, "sp_off", 6)) {
printk(KERN_INFO pr_info("Disable supported super page\n");
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0; intel_iommu_superpage = 0;
} else if (!strncmp(str, "ecs_off", 7)) { } else if (!strncmp(str, "ecs_off", 7)) {
printk(KERN_INFO printk(KERN_INFO
@ -1132,7 +1131,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
root = (struct root_entry *)alloc_pgtable_page(iommu->node); root = (struct root_entry *)alloc_pgtable_page(iommu->node);
if (!root) { if (!root) {
pr_err("IOMMU: allocating root entry for %s failed\n", pr_err("Allocating root entry for %s failed\n",
iommu->name); iommu->name);
return -ENOMEM; return -ENOMEM;
} }
@ -1270,9 +1269,9 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
/* check IOTLB invalidation granularity */ /* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0) if (DMA_TLB_IAIG(val) == 0)
printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); pr_err("Flush IOTLB failed\n");
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", pr_debug("TLB flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type), (unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val)); (unsigned long long)DMA_TLB_IAIG(val));
} }
@ -1443,8 +1442,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
unsigned long nlongs; unsigned long nlongs;
ndomains = cap_ndoms(iommu->cap); ndomains = cap_ndoms(iommu->cap);
pr_debug("IOMMU%d: Number of Domains supported <%ld>\n", pr_debug("%s: Number of Domains supported <%ld>\n",
iommu->seq_id, ndomains); iommu->name, ndomains);
nlongs = BITS_TO_LONGS(ndomains); nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock); spin_lock_init(&iommu->lock);
@ -1454,15 +1453,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
*/ */
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
if (!iommu->domain_ids) { if (!iommu->domain_ids) {
pr_err("IOMMU%d: allocating domain id array failed\n", pr_err("%s: Allocating domain id array failed\n",
iommu->seq_id); iommu->name);
return -ENOMEM; return -ENOMEM;
} }
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
GFP_KERNEL); GFP_KERNEL);
if (!iommu->domains) { if (!iommu->domains) {
pr_err("IOMMU%d: allocating domain array failed\n", pr_err("%s: Allocating domain array failed\n",
iommu->seq_id); iommu->name);
kfree(iommu->domain_ids); kfree(iommu->domain_ids);
iommu->domain_ids = NULL; iommu->domain_ids = NULL;
return -ENOMEM; return -ENOMEM;
@ -1567,7 +1566,7 @@ static int iommu_attach_domain(struct dmar_domain *domain,
num = __iommu_attach_domain(domain, iommu); num = __iommu_attach_domain(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
if (num < 0) if (num < 0)
pr_err("IOMMU: no free domain ids\n"); pr_err("%s: No free domain ids\n", iommu->name);
return num; return num;
} }
@ -1659,7 +1658,7 @@ static int dmar_init_reserved_ranges(void)
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END)); IOVA_PFN(IOAPIC_RANGE_END));
if (!iova) { if (!iova) {
printk(KERN_ERR "Reserve IOAPIC range failed\n"); pr_err("Reserve IOAPIC range failed\n");
return -ENODEV; return -ENODEV;
} }
@ -1675,7 +1674,7 @@ static int dmar_init_reserved_ranges(void)
IOVA_PFN(r->start), IOVA_PFN(r->start),
IOVA_PFN(r->end)); IOVA_PFN(r->end));
if (!iova) { if (!iova) {
printk(KERN_ERR "Reserve iova failed\n"); pr_err("Reserve iova failed\n");
return -ENODEV; return -ENODEV;
} }
} }
@ -1722,7 +1721,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
sagaw = cap_sagaw(iommu->cap); sagaw = cap_sagaw(iommu->cap);
if (!test_bit(agaw, &sagaw)) { if (!test_bit(agaw, &sagaw)) {
/* hardware doesn't support it, choose a bigger one */ /* hardware doesn't support it, choose a bigger one */
pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); pr_debug("Hardware doesn't support agaw %d\n", agaw);
agaw = find_next_bit(&sagaw, 5, agaw); agaw = find_next_bit(&sagaw, 5, agaw);
if (agaw >= 5) if (agaw >= 5)
return -ENODEV; return -ENODEV;
@ -1823,7 +1822,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
id = iommu_attach_vm_domain(domain, iommu); id = iommu_attach_vm_domain(domain, iommu);
if (id < 0) { if (id < 0) {
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
pr_err("IOMMU: no free domain ids\n"); pr_err("%s: No free domain ids\n", iommu->name);
return -EFAULT; return -EFAULT;
} }
} }
@ -2050,8 +2049,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
if (tmp) { if (tmp) {
static int dumps = 5; static int dumps = 5;
printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
iov_pfn, tmp, (unsigned long long)pteval); iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) { if (dumps) {
dumps--; dumps--;
debug_dma_dump_mappings(NULL); debug_dma_dump_mappings(NULL);
@ -2323,7 +2322,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
dma_to_mm_pfn(last_vpfn))) { dma_to_mm_pfn(last_vpfn))) {
printk(KERN_ERR "IOMMU: reserve iova failed\n"); pr_err("Reserving iova failed\n");
return -ENOMEM; return -ENOMEM;
} }
@ -2356,15 +2355,14 @@ static int iommu_prepare_identity_map(struct device *dev,
range which is reserved in E820, so which didn't get set range which is reserved in E820, so which didn't get set
up to start with in si_domain */ up to start with in si_domain */
if (domain == si_domain && hw_pass_through) { if (domain == si_domain && hw_pass_through) {
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
dev_name(dev), start, end); dev_name(dev), start, end);
return 0; return 0;
} }
printk(KERN_INFO pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", dev_name(dev), start, end);
dev_name(dev), start, end);
if (end < start) { if (end < start) {
WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n", "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@ -2421,12 +2419,11 @@ static inline void iommu_prepare_isa(void)
if (!pdev) if (!pdev)
return; return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); pr_info("Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1); ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
if (ret) if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
"floppy might not work\n");
pci_dev_put(pdev); pci_dev_put(pdev);
} }
@ -2470,7 +2467,7 @@ static int __init si_domain_init(int hw)
return -EFAULT; return -EFAULT;
} }
pr_debug("IOMMU: identity mapping domain is domain %d\n", pr_debug("Identity mapping domain is domain %d\n",
si_domain->id); si_domain->id);
if (hw) if (hw)
@ -2670,8 +2667,8 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
hw ? CONTEXT_TT_PASS_THROUGH : hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (!ret) if (!ret)
pr_info("IOMMU: %s identity mapping for device %s\n", pr_info("%s identity mapping for device %s\n",
hw ? "hardware" : "software", dev_name(dev)); hw ? "Hardware" : "Software", dev_name(dev));
else if (ret == -ENODEV) else if (ret == -ENODEV)
/* device not associated with an iommu */ /* device not associated with an iommu */
ret = 0; ret = 0;
@ -2748,12 +2745,12 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
*/ */
iommu->flush.flush_context = __iommu_flush_context; iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb; iommu->flush.flush_iotlb = __iommu_flush_iotlb;
pr_info("IOMMU: %s using Register based invalidation\n", pr_info("%s: Using Register based invalidation\n",
iommu->name); iommu->name);
} else { } else {
iommu->flush.flush_context = qi_flush_context; iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb; iommu->flush.flush_iotlb = qi_flush_iotlb;
pr_info("IOMMU: %s using Queued invalidation\n", iommu->name); pr_info("%s: Using Queued invalidation\n", iommu->name);
} }
} }
@ -2781,8 +2778,7 @@ static int __init init_dmars(void)
g_num_of_iommus++; g_num_of_iommus++;
continue; continue;
} }
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
DMAR_UNITS_SUPPORTED);
} }
/* Preallocate enough resources for IOMMU hot-addition */ /* Preallocate enough resources for IOMMU hot-addition */
@ -2792,7 +2788,7 @@ static int __init init_dmars(void)
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL); GFP_KERNEL);
if (!g_iommus) { if (!g_iommus) {
printk(KERN_ERR "Allocating global iommu array failed\n"); pr_err("Allocating global iommu array failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
@ -2843,7 +2839,7 @@ static int __init init_dmars(void)
if (iommu_identity_mapping) { if (iommu_identity_mapping) {
ret = iommu_prepare_static_identity_mapping(hw_pass_through); ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) { if (ret) {
printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); pr_crit("Failed to setup IOMMU pass-through\n");
goto free_iommu; goto free_iommu;
} }
} }
@ -2861,15 +2857,14 @@ static int __init init_dmars(void)
* endfor * endfor
* endfor * endfor
*/ */
printk(KERN_INFO "IOMMU: Setting RMRR:\n"); pr_info("Setting RMRR:\n");
for_each_rmrr_units(rmrr) { for_each_rmrr_units(rmrr) {
/* some BIOS lists non-exist devices in DMAR table. */ /* some BIOS lists non-exist devices in DMAR table. */
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, dev) { i, dev) {
ret = iommu_prepare_rmrr_dev(rmrr, dev); ret = iommu_prepare_rmrr_dev(rmrr, dev);
if (ret) if (ret)
printk(KERN_ERR pr_err("Mapping reserved region failed\n");
"IOMMU: mapping reserved region failed\n");
} }
} }
@ -2944,7 +2939,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
} }
iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
if (unlikely(!iova)) { if (unlikely(!iova)) {
printk(KERN_ERR "Allocating %ld-page iova for %s failed", pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev)); nrpages, dev_name(dev));
return NULL; return NULL;
} }
@ -2959,7 +2954,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) { if (!domain) {
printk(KERN_ERR "Allocating domain for %s failed", pr_err("Allocating domain for %s failed\n",
dev_name(dev)); dev_name(dev));
return NULL; return NULL;
} }
@ -2968,7 +2963,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
if (unlikely(!domain_context_mapped(dev))) { if (unlikely(!domain_context_mapped(dev))) {
ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
if (ret) { if (ret) {
printk(KERN_ERR "Domain context map for %s failed", pr_err("Domain context map for %s failed\n",
dev_name(dev)); dev_name(dev));
return NULL; return NULL;
} }
@ -3010,8 +3005,8 @@ static int iommu_no_mapping(struct device *dev)
* to non-identity mapping. * to non-identity mapping.
*/ */
domain_remove_one_dev_info(si_domain, dev); domain_remove_one_dev_info(si_domain, dev);
printk(KERN_INFO "32bit %s uses non-identity mapping\n", pr_info("32bit %s uses non-identity mapping\n",
dev_name(dev)); dev_name(dev));
return 0; return 0;
} }
} else { } else {
@ -3026,8 +3021,8 @@ static int iommu_no_mapping(struct device *dev)
CONTEXT_TT_PASS_THROUGH : CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (!ret) { if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n", pr_info("64bit %s uses identity mapping\n",
dev_name(dev)); dev_name(dev));
return 1; return 1;
} }
} }
@ -3096,7 +3091,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
error: error:
if (iova) if (iova)
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir); dev_name(dev), size, (unsigned long long)paddr, dir);
return 0; return 0;
} }
@ -3411,7 +3406,7 @@ static inline int iommu_domain_cache_init(void)
NULL); NULL);
if (!iommu_domain_cache) { if (!iommu_domain_cache) {
printk(KERN_ERR "Couldn't create iommu_domain cache\n"); pr_err("Couldn't create iommu_domain cache\n");
ret = -ENOMEM; ret = -ENOMEM;
} }
@ -3428,7 +3423,7 @@ static inline int iommu_devinfo_cache_init(void)
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
if (!iommu_devinfo_cache) { if (!iommu_devinfo_cache) {
printk(KERN_ERR "Couldn't create devinfo cache\n"); pr_err("Couldn't create devinfo cache\n");
ret = -ENOMEM; ret = -ENOMEM;
} }
@ -3805,19 +3800,19 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
return 0; return 0;
if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
pr_warn("IOMMU: %s doesn't support hardware pass through.\n", pr_warn("%s: Doesn't support hardware pass through.\n",
iommu->name); iommu->name);
return -ENXIO; return -ENXIO;
} }
if (!ecap_sc_support(iommu->ecap) && if (!ecap_sc_support(iommu->ecap) &&
domain_update_iommu_snooping(iommu)) { domain_update_iommu_snooping(iommu)) {
pr_warn("IOMMU: %s doesn't support snooping.\n", pr_warn("%s: Doesn't support snooping.\n",
iommu->name); iommu->name);
return -ENXIO; return -ENXIO;
} }
sp = domain_update_iommu_superpage(iommu) - 1; sp = domain_update_iommu_superpage(iommu) - 1;
if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
pr_warn("IOMMU: %s doesn't support large page.\n", pr_warn("%s: Doesn't support large page.\n",
iommu->name); iommu->name);
return -ENXIO; return -ENXIO;
} }
@ -4048,7 +4043,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
start = mhp->start_pfn << PAGE_SHIFT; start = mhp->start_pfn << PAGE_SHIFT;
end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
if (iommu_domain_identity_map(si_domain, start, end)) { if (iommu_domain_identity_map(si_domain, start, end)) {
pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", pr_warn("Failed to build identity map for [%llx-%llx]\n",
start, end); start, end);
return NOTIFY_BAD; return NOTIFY_BAD;
} }
@ -4066,7 +4061,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
iova = find_iova(&si_domain->iovad, start_vpfn); iova = find_iova(&si_domain->iovad, start_vpfn);
if (iova == NULL) { if (iova == NULL) {
pr_debug("dmar: failed get IOVA for PFN %lx\n", pr_debug("Failed get IOVA for PFN %lx\n",
start_vpfn); start_vpfn);
break; break;
} }
@ -4074,7 +4069,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
iova = split_and_remove_iova(&si_domain->iovad, iova, iova = split_and_remove_iova(&si_domain->iovad, iova,
start_vpfn, last_vpfn); start_vpfn, last_vpfn);
if (iova == NULL) { if (iova == NULL) {
pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
start_vpfn, last_vpfn); start_vpfn, last_vpfn);
return NOTIFY_BAD; return NOTIFY_BAD;
} }
@ -4200,10 +4195,10 @@ int __init intel_iommu_init(void)
goto out_free_dmar; goto out_free_dmar;
if (list_empty(&dmar_rmrr_units)) if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO "DMAR: No RMRR found\n"); pr_info("No RMRR found\n");
if (list_empty(&dmar_atsr_units)) if (list_empty(&dmar_atsr_units))
printk(KERN_INFO "DMAR: No ATSR found\n"); pr_info("No ATSR found\n");
if (dmar_init_reserved_ranges()) { if (dmar_init_reserved_ranges()) {
if (force_on) if (force_on)
@ -4217,12 +4212,11 @@ int __init intel_iommu_init(void)
if (ret) { if (ret) {
if (force_on) if (force_on)
panic("tboot: Failed to initialize DMARs\n"); panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n"); pr_err("Initialization failed\n");
goto out_free_reserved_range; goto out_free_reserved_range;
} }
up_write(&dmar_global_lock); up_write(&dmar_global_lock);
printk(KERN_INFO pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
init_timer(&unmap_timer); init_timer(&unmap_timer);
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
@ -4364,13 +4358,11 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) { if (!dmar_domain) {
printk(KERN_ERR pr_err("Can't allocate dmar_domain\n");
"intel_iommu_domain_init: dmar_domain == NULL\n");
return NULL; return NULL;
} }
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR pr_err("Domain initialization failed\n");
"intel_iommu_domain_init() failed\n");
domain_exit(dmar_domain); domain_exit(dmar_domain);
return NULL; return NULL;
} }
@ -4429,7 +4421,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
addr_width = cap_mgaw(iommu->cap); addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->max_addr > (1LL << addr_width)) { if (dmar_domain->max_addr > (1LL << addr_width)) {
printk(KERN_ERR "%s: iommu width (%d) is not " pr_err("%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n", "sufficient for the mapped address (%llx)\n",
__func__, addr_width, dmar_domain->max_addr); __func__, addr_width, dmar_domain->max_addr);
return -EFAULT; return -EFAULT;
@ -4483,7 +4475,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
/* check if minimum agaw is sufficient for mapped address */ /* check if minimum agaw is sufficient for mapped address */
end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
if (end < max_addr) { if (end < max_addr) {
printk(KERN_ERR "%s: iommu width (%d) is not " pr_err("%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n", "sufficient for the mapped address (%llx)\n",
__func__, dmar_domain->gaw, max_addr); __func__, dmar_domain->gaw, max_addr);
return -EFAULT; return -EFAULT;
@ -4624,7 +4616,7 @@ static const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_g4x_gfx(struct pci_dev *dev) static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
{ {
/* G4x/GM45 integrated gfx dmar support is totally busted. */ /* G4x/GM45 integrated gfx dmar support is totally busted. */
printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); pr_info("Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0; dmar_map_gfx = 0;
} }
@ -4642,7 +4634,7 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
* Mobile 4 Series Chipset neglects to set RWBF capability, * Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions. * but needs it. Same seems to hold for the desktop versions.
*/ */
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); pr_info("Forcing write-buffer flush capability\n");
rwbf_quirk = 1; rwbf_quirk = 1;
} }
@ -4672,11 +4664,11 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
return; return;
if (!(ggc & GGC_MEMORY_VT_ENABLED)) { if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0; dmar_map_gfx = 0;
} else if (dmar_map_gfx) { } else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */ /* we have to ensure the gfx device is idle before we flush */
printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); pr_info("Disabling batched IOTLB flush on Ironlake\n");
intel_iommu_strict = 1; intel_iommu_strict = 1;
} }
} }
@ -4738,7 +4730,7 @@ static void __init check_tylersburg_isoch(void)
iommu_identity_mapping |= IDENTMAP_AZALIA; iommu_identity_mapping |= IDENTMAP_AZALIA;
return; return;
} }
printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl); vtisochctrl);
} }

View file

@ -1,3 +1,6 @@
#define pr_fmt(fmt) "DMAR-IR: " fmt
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dmar.h> #include <linux/dmar.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
@ -100,8 +103,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
} }
if (mask > ecap_max_handle_mask(iommu->ecap)) { if (mask > ecap_max_handle_mask(iommu->ecap)) {
printk(KERN_ERR pr_err("Requested mask %x exceeds the max invalidation handle"
"Requested mask %x exceeds the max invalidation handle"
" mask value %Lx\n", mask, " mask value %Lx\n", mask,
ecap_max_handle_mask(iommu->ecap)); ecap_max_handle_mask(iommu->ecap));
return -1; return -1;
@ -333,7 +335,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
up_read(&dmar_global_lock); up_read(&dmar_global_lock);
if (sid == 0) { if (sid == 0) {
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
return -1; return -1;
} }
@ -360,7 +362,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
up_read(&dmar_global_lock); up_read(&dmar_global_lock);
if (sid == 0) { if (sid == 0) {
pr_warning("Failed to set source-id of HPET block (%d)\n", id); pr_warn("Failed to set source-id of HPET block (%d)\n", id);
return -1; return -1;
} }
@ -580,7 +582,7 @@ static void __init intel_cleanup_irq_remapping(void)
} }
if (x2apic_supported()) if (x2apic_supported())
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
} }
static int __init intel_prepare_irq_remapping(void) static int __init intel_prepare_irq_remapping(void)
@ -589,8 +591,7 @@ static int __init intel_prepare_irq_remapping(void)
struct intel_iommu *iommu; struct intel_iommu *iommu;
if (irq_remap_broken) { if (irq_remap_broken) {
printk(KERN_WARNING pr_warn("This system BIOS has enabled interrupt remapping\n"
"This system BIOS has enabled interrupt remapping\n"
"on a chipset that contains an erratum making that\n" "on a chipset that contains an erratum making that\n"
"feature unstable. To maintain system stability\n" "feature unstable. To maintain system stability\n"
"interrupt remapping is being disabled. Please\n" "interrupt remapping is being disabled. Please\n"
@ -606,7 +607,7 @@ static int __init intel_prepare_irq_remapping(void)
return -ENODEV; return -ENODEV;
if (parse_ioapics_under_ir() != 1) { if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enabling interrupt remapping\n"); pr_info("Not enabling interrupt remapping\n");
goto error; goto error;
} }
@ -667,8 +668,8 @@ static int __init intel_enable_irq_remapping(void)
*/ */
for_each_iommu(iommu, drhd) for_each_iommu(iommu, drhd)
if (eim && !ecap_eim_support(iommu->ecap)) { if (eim && !ecap_eim_support(iommu->ecap)) {
printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " pr_info("DRHD %Lx: EIM not supported by DRHD, "
" ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
eim = 0; eim = 0;
} }
eim_mode = eim; eim_mode = eim;
@ -682,7 +683,7 @@ static int __init intel_enable_irq_remapping(void)
int ret = dmar_enable_qi(iommu); int ret = dmar_enable_qi(iommu);
if (ret) { if (ret) {
printk(KERN_ERR "DRHD %Lx: failed to enable queued, " pr_err("DRHD %Lx: failed to enable queued, "
" invalidation, ecap %Lx, ret %d\n", " invalidation, ecap %Lx, ret %d\n",
drhd->reg_base_addr, iommu->ecap, ret); drhd->reg_base_addr, iommu->ecap, ret);
goto error; goto error;
@ -1145,14 +1146,12 @@ static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
iommu = map_dev_to_ir(dev); iommu = map_dev_to_ir(dev);
if (!iommu) { if (!iommu) {
printk(KERN_ERR pr_err("Unable to map PCI %s to iommu\n", pci_name(dev));
"Unable to map PCI %s to iommu\n", pci_name(dev));
index = -ENOENT; index = -ENOENT;
} else { } else {
index = alloc_irte(iommu, irq, nvec); index = alloc_irte(iommu, irq, nvec);
if (index < 0) { if (index < 0) {
printk(KERN_ERR pr_err("Unable to allocate %d IRTE for PCI %s\n",
"Unable to allocate %d IRTE for PCI %s\n",
nvec, pci_name(dev)); nvec, pci_name(dev));
index = -ENOSPC; index = -ENOSPC;
} }