1
0
Fork 0

Merge branch 'slab/urgent' into slab/next

wifi-calibration
Pekka Enberg 2011-11-27 22:08:03 +02:00
commit 42616cacf8
1 changed files with 26 additions and 16 deletions

View File

@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
{ {
struct kmem_cache_node *n = NULL; struct kmem_cache_node *n = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
struct page *page; struct page *page, *discard_page = NULL;
while ((page = c->partial)) { while ((page = c->partial)) {
enum slab_modes { M_PARTIAL, M_FREE }; enum slab_modes { M_PARTIAL, M_FREE };
@ -1904,7 +1904,8 @@ static void unfreeze_partials(struct kmem_cache *s)
if (l == M_PARTIAL) if (l == M_PARTIAL)
remove_partial(n, page); remove_partial(n, page);
else else
add_partial(n, page, 1); add_partial(n, page,
DEACTIVATE_TO_TAIL);
l = m; l = m;
} }
@ -1915,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
"unfreezing slab")); "unfreezing slab"));
if (m == M_FREE) { if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY); page->next = discard_page;
discard_slab(s, page); discard_page = page;
stat(s, FREE_SLAB);
} }
} }
if (n) if (n)
spin_unlock(&n->list_lock); spin_unlock(&n->list_lock);
while (discard_page) {
page = discard_page;
discard_page = discard_page->next;
stat(s, DEACTIVATE_EMPTY);
discard_slab(s, page);
stat(s, FREE_SLAB);
}
} }
/* /*
@ -1969,7 +1978,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects; page->pobjects = pobjects;
page->next = oldpage; page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
stat(s, CPU_PARTIAL_FREE); stat(s, CPU_PARTIAL_FREE);
return pobjects; return pobjects;
} }
@ -4435,30 +4444,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
int node = ACCESS_ONCE(c->node);
struct page *page; struct page *page;
if (!c || c->node < 0) if (node < 0)
continue; continue;
page = ACCESS_ONCE(c->page);
if (c->page) { if (page) {
if (flags & SO_TOTAL) if (flags & SO_TOTAL)
x = c->page->objects; x = page->objects;
else if (flags & SO_OBJECTS) else if (flags & SO_OBJECTS)
x = c->page->inuse; x = page->inuse;
else else
x = 1; x = 1;
total += x; total += x;
nodes[c->node] += x; nodes[node] += x;
} }
page = c->partial; page = c->partial;
if (page) { if (page) {
x = page->pobjects; x = page->pobjects;
total += x; total += x;
nodes[c->node] += x; nodes[node] += x;
} }
per_cpu[c->node]++; per_cpu[node]++;
} }
} }