1
0
Fork 0

drm/ttm: add support for different pool sizes

Correctly handle different page sizes in the memory accounting.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Christian König 2017-07-04 16:56:24 +02:00 committed by Alex Deucher
parent f9ebec52b5
commit d188bfa553
4 changed files with 14 additions and 12 deletions

View File

@ -546,7 +546,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page)
struct page *page, uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@ -563,10 +563,11 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, false, false);
return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
uint64_t size)
{
struct ttm_mem_zone *zone = NULL;
@ -577,10 +578,9 @@ void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
ttm_mem_global_free_zone(glob, zone, size);
}
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)

View File

@ -882,7 +882,8 @@ int ttm_pool_populate(struct ttm_tt *ttm)
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]);
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
@ -909,7 +910,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm->pages[i], PAGE_SIZE);
ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);

View File

@ -902,7 +902,8 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i]);
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
@ -967,13 +968,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (is_cached) {
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
d_page->p);
d_page->p, pool->size);
ttm_dma_page_put(pool, d_page);
}
} else {
for (i = 0; i < count; i++) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm->pages[i], pool->size);
}
}

View File

@ -150,9 +150,9 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page);
struct page *page, uint64_t size);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
struct page *page);
struct page *page, uint64_t size);
extern size_t ttm_round_pot(size_t size);
extern uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob);
#endif