1
0
Fork 0

vmstat: use this_cpu() to avoid irqon/off sequence in refresh_cpu_vm_stats

Disabling interrupts repeatedly can be avoided in the inner loop if we use
a this_cpu operation.

Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
CC: Tejun Heo <tj@kernel.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
Christoph Lameter 2013-09-11 14:21:32 -07:00 committed by Linus Torvalds
parent 4edb0748b2
commit fbc2edb053
1 changed files with 16 additions and 19 deletions

View File

@ -437,33 +437,29 @@ static inline void fold_diff(int *diff)
* with the global counters. These could cause remote node cache line * with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary. * bouncing and will have to be only done when necessary.
*/ */
static void refresh_cpu_vm_stats(int cpu) static void refresh_cpu_vm_stats(void)
{ {
struct zone *zone; struct zone *zone;
int i; int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
struct per_cpu_pageset *p; struct per_cpu_pageset __percpu *p = zone->pageset;
p = per_cpu_ptr(zone->pageset, cpu); for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
int v;
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) v = this_cpu_xchg(p->vm_stat_diff[i], 0);
if (p->vm_stat_diff[i]) { if (v) {
unsigned long flags;
int v;
local_irq_save(flags);
v = p->vm_stat_diff[i];
p->vm_stat_diff[i] = 0;
local_irq_restore(flags);
atomic_long_add(v, &zone->vm_stat[i]); atomic_long_add(v, &zone->vm_stat[i]);
global_diff[i] += v; global_diff[i] += v;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* 3 seconds idle till flush */ /* 3 seconds idle till flush */
p->expire = 3; __this_cpu_write(p->expire, 3);
#endif #endif
} }
}
cond_resched(); cond_resched();
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
@ -473,23 +469,24 @@ static void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset * Check if there are pages remaining in this pageset
* if not then there is nothing to expire. * if not then there is nothing to expire.
*/ */
if (!p->expire || !p->pcp.count) if (!__this_cpu_read(p->expire) ||
!__this_cpu_read(p->pcp.count))
continue; continue;
/* /*
* We never drain zones local to this processor. * We never drain zones local to this processor.
*/ */
if (zone_to_nid(zone) == numa_node_id()) { if (zone_to_nid(zone) == numa_node_id()) {
p->expire = 0; __this_cpu_write(p->expire, 0);
continue; continue;
} }
p->expire--;
if (p->expire) if (__this_cpu_dec_return(p->expire))
continue; continue;
if (p->pcp.count) if (__this_cpu_read(p->pcp.count))
drain_zone_pages(zone, &p->pcp); drain_zone_pages(zone, __this_cpu_ptr(&p->pcp));
#endif #endif
} }
fold_diff(global_diff); fold_diff(global_diff);
@ -1216,7 +1213,7 @@ int sysctl_stat_interval __read_mostly = HZ;
static void vmstat_update(struct work_struct *w) static void vmstat_update(struct work_struct *w)
{ {
refresh_cpu_vm_stats(smp_processor_id()); refresh_cpu_vm_stats();
schedule_delayed_work(&__get_cpu_var(vmstat_work), schedule_delayed_work(&__get_cpu_var(vmstat_work),
round_jiffies_relative(sysctl_stat_interval)); round_jiffies_relative(sysctl_stat_interval));
} }