1
0
Fork 0

iommu/amd: Drop the lock while allocating new irq remap table

The irq_remap_table is allocated while the iommu_table_lock is held with
interrupts disabled.
>From looking at the call sites, all callers are in the early device
initialisation (apic_bsp_setup(), pci_enable_device(),
pci_enable_msi()) so make sense to drop the lock which also enables
interrupts and try to allocate that memory with GFP_KERNEL instead
GFP_ATOMIC.

Since during the allocation the iommu_table_lock is dropped, we need to
recheck if table exists after the lock has been reacquired. I *think*
that it is impossible that the "devid" entry appears in irq_lookup_table
while the lock is dropped since the same device can only be probed once.
However I check for both cases, just to be sure.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
hifive-unleashed-5.1
Sebastian Andrzej Siewior 2018-03-22 16:22:40 +01:00 committed by Joerg Roedel
parent 2fcc1e8ac4
commit 993ca6e063
1 changed files with 46 additions and 19 deletions

View File

@ -3617,6 +3617,30 @@ static struct irq_remap_table *get_irq_table(u16 devid)
return table;
}
static struct irq_remap_table *__alloc_irq_table(void)
{
struct irq_remap_table *table;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
if (!table->table) {
kfree(table);
return NULL;
}
raw_spin_lock_init(&table->lock);
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
memset(table->table, 0,
MAX_IRQS_PER_TABLE * sizeof(u32));
else
memset(table->table, 0,
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
return table;
}
static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
struct irq_remap_table *table)
{
@ -3628,6 +3652,7 @@ static void set_remap_table_entry(struct amd_iommu *iommu, u16 devid,
static struct irq_remap_table *alloc_irq_table(u16 devid)
{
struct irq_remap_table *table = NULL;
struct irq_remap_table *new_table = NULL;
struct amd_iommu *iommu;
unsigned long flags;
u16 alias;
@ -3646,42 +3671,44 @@ static struct irq_remap_table *alloc_irq_table(u16 devid)
table = irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out;
goto out_wait;
}
spin_unlock_irqrestore(&iommu_table_lock, flags);
/* Nothing there yet, allocate new irq remapping table */
table = kzalloc(sizeof(*table), GFP_ATOMIC);
if (!table)
new_table = __alloc_irq_table();
if (!new_table)
return NULL;
spin_lock_irqsave(&iommu_table_lock, flags);
table = irq_lookup_table[devid];
if (table)
goto out_unlock;
/* Initialize table spin-lock */
raw_spin_lock_init(&table->lock);
table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
if (!table->table) {
kfree(table);
table = NULL;
goto out_unlock;
table = irq_lookup_table[alias];
if (table) {
set_remap_table_entry(iommu, devid, table);
goto out_wait;
}
if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
memset(table->table, 0,
MAX_IRQS_PER_TABLE * sizeof(u32));
else
memset(table->table, 0,
(MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
table = new_table;
new_table = NULL;
set_remap_table_entry(iommu, devid, table);
if (devid != alias)
set_remap_table_entry(iommu, alias, table);
out:
out_wait:
iommu_completion_wait(iommu);
out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
kmem_cache_free(amd_iommu_irq_cache, new_table->table);
kfree(new_table);
}
return table;
}