1
0
Fork 0

drm/ttm: improve uncached page deallocation.

Calls to set_memory_wb() incure heavy TLB flush and IPI cost. To
minimize those wait until pool grow beyond batch size before
draining the pool.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Reviewed-by: Mario Kleiner <mario.kleiner.de@gmail.com>
Reviewed-and-Tested-by: Michel Dänzer <michel@daenzer.net>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
hifive-unleashed-5.1
Jérôme Glisse 2015-07-09 14:19:30 -04:00 committed by Dave Airlie
parent ef2b731759
commit e930888487
1 changed files with 6 additions and 6 deletions

View File

@ -963,13 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} else {
pool->npages_free += count;
list_splice(&ttm_dma->pages_list, &pool->free_list);
if (pool->npages_free > _manager->options.max_size) {
/*
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
* to free in order to minimize calls to set_memory_wb().
*/
if (pool->npages_free >= (_manager->options.max_size +
NUM_PAGES_TO_ALLOC))
npages = pool->npages_free - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
}
spin_unlock_irqrestore(&pool->lock, irq_flags);