1
0
Fork 0

drm/ttm: Add sysfs interface to control pool allocator.

Sysfs interface allows user to configure pool allocator functionality and
change limits for the size of pool.

Signed-off-by: Pauli Nieminen <suokkos@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
hifive-unleashed-5.1
Pauli Nieminen 2010-04-01 12:45:03 +00:00 committed by Dave Airlie
parent 975efdb1bf
commit c96af79e34
3 changed files with 114 additions and 3 deletions

View File

@ -393,7 +393,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
"Zone %7s: Available graphics memory: %llu kiB.\n",
zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);

View File

@ -72,6 +72,12 @@ struct ttm_page_pool {
unsigned long nrefills;
};
/**
* Limits for the pool. They are handled without locks because only place where
* they may change is in sysfs store. They won't have immediate effect anyway
* so forcing serialiazation to access them is pointless.
*/
struct ttm_pool_opts {
unsigned alloc_size;
unsigned max_size;
@ -94,6 +100,7 @@ struct ttm_pool_opts {
* @pools: All pool objects in use.
**/
struct ttm_pool_manager {
struct kobject kobj;
struct shrinker mm_shrink;
atomic_t page_alloc_inited;
struct ttm_pool_opts options;
@ -109,6 +116,100 @@ struct ttm_pool_manager {
};
};
static struct attribute ttm_page_pool_max = {
.name = "pool_max_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
.name = "pool_small_allocation",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
.name = "pool_allocation_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute *ttm_pool_attrs[] = {
&ttm_page_pool_max,
&ttm_page_pool_small,
&ttm_page_pool_alloc_size,
NULL
};
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
(void)m;
}
static ssize_t ttm_pool_store(struct kobject *kobj,
struct attribute *attr, const char *buffer, size_t size)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
printk(KERN_ERR "[ttm] Setting allocation size to %lu "
"is not allowed. Recomended size is "
"%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
printk(KERN_WARNING "[ttm] Setting allocation size to "
"larger than %lu is not recomended.\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
return size;
}
static ssize_t ttm_pool_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
unsigned val = 0;
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
val = val * (PAGE_SIZE >> 10);
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
.sysfs_ops = &ttm_pool_sysfs_ops,
.default_attrs = ttm_pool_attrs,
};
static struct ttm_pool_manager _manager = {
.page_alloc_inited = ATOMIC_INIT(0)
};
@ -669,8 +770,9 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
pool->name = name;
}
int ttm_page_alloc_init(unsigned max_pages)
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
{
int ret;
if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
return 0;
@ -690,6 +792,13 @@ int ttm_page_alloc_init(unsigned max_pages)
_manager.options.small = SMALL_ALLOCATION;
_manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
if (unlikely(ret != 0)) {
kobject_put(&_manager.kobj);
return ret;
}
ttm_pool_mm_shrink_init(&_manager);
return 0;
@ -707,6 +816,8 @@ void ttm_page_alloc_fini()
for (i = 0; i < NUM_POOLS; ++i)
ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
kobject_put(&_manager.kobj);
}
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)

View File

@ -61,7 +61,7 @@ void ttm_put_pages(struct list_head *pages,
* multiple times but ttm_page_alloc_fini has to be called same number of
* times.
*/
int ttm_page_alloc_init(unsigned max_pages);
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
/**
* Free pool allocator.
*/