1
0
Fork 0

iommu/amd: Implement timeout to flush unmap queues

In case the queue doesn't fill up, we flush the TLB at least
10ms after the unmap happened to make sure that the TLB is
cleaned up.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
hifive-unleashed-5.1
Joerg Roedel 2016-07-06 13:56:36 +02:00
parent b1516a1465
commit bb279475db
1 changed files with 28 additions and 0 deletions

View File

@ -105,6 +105,9 @@ struct flush_queue {
DEFINE_PER_CPU(struct flush_queue, flush_queue);
static atomic_t queue_timer_on;
static struct timer_list queue_timer;
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
@ -2151,6 +2154,24 @@ static void __queue_flush(struct flush_queue *queue)
queue->next = 0;
}
void queue_flush_timeout(unsigned long unsused)
{
int cpu;
atomic_set(&queue_timer_on, 0);
for_each_possible_cpu(cpu) {
struct flush_queue *queue;
unsigned long flags;
queue = per_cpu_ptr(&flush_queue, cpu);
spin_lock_irqsave(&queue->lock, flags);
if (queue->next > 0)
__queue_flush(queue);
spin_unlock_irqrestore(&queue->lock, flags);
}
}
static void queue_add(struct dma_ops_domain *dma_dom,
unsigned long address, unsigned long pages)
{
@ -2176,6 +2197,10 @@ static void queue_add(struct dma_ops_domain *dma_dom,
entry->dma_dom = dma_dom;
spin_unlock_irqrestore(&queue->lock, flags);
if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
put_cpu_ptr(&flush_queue);
}
@ -2634,6 +2659,9 @@ out_put_iova:
int __init amd_iommu_init_dma_ops(void)
{
setup_timer(&queue_timer, queue_flush_timeout, 0);
atomic_set(&queue_timer_on, 0);
swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;