sched, cpuacct: refactoring cpuusage_read / cpuusage_write

Impact: micro-optimize the code on 64-bit architectures

In the thread regarding to 'export percpu cpuacct cgroup stats'
http://lkml.org/lkml/2008/12/7/13

akpm pointed out that current cpuacct code is inefficient.  This patch
refactoring the following:

* make cpu_rq locking only on 32-bit
* change iterator to each_present_cpu instead of each_possible_cpu to
  make it hotplug friendly.

It's a bit of code churn, but I was rewarded with 160 byte code size saving
on x86-64 arch and zero code size change on i386.

Signed-off-by: Ken Chen <kenchen@google.com>
Cc: Paul Menage <menage@google.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ken Chen 2008-12-15 22:02:01 -08:00 committed by Ingo Molnar
parent 34f28ecd0f
commit 720f54988e

View file

@ -9275,6 +9275,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
kfree(ca);
}
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
u64 data;
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
*/
spin_lock_irq(&cpu_rq(cpu)->lock);
data = *cpuusage;
spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
data = *cpuusage;
#endif
return data;
}
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
#ifndef CONFIG_64BIT
/*
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
*/
spin_lock_irq(&cpu_rq(cpu)->lock);
*cpuusage = val;
spin_unlock_irq(&cpu_rq(cpu)->lock);
#else
*cpuusage = val;
#endif
}
/* return total cpu usage (in nanoseconds) of a group */
static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
{
@ -9282,17 +9317,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
u64 totalcpuusage = 0;
int i;
for_each_possible_cpu(i) {
u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
/*
* Take rq->lock to make 64-bit addition safe on 32-bit
* platforms.
*/
spin_lock_irq(&cpu_rq(i)->lock);
totalcpuusage += *cpuusage;
spin_unlock_irq(&cpu_rq(i)->lock);
}
for_each_present_cpu(i)
totalcpuusage += cpuacct_cpuusage_read(ca, i);
return totalcpuusage;
}
@ -9309,13 +9335,9 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
goto out;
}
for_each_possible_cpu(i) {
u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
for_each_present_cpu(i)
cpuacct_cpuusage_write(ca, i, 0);
spin_lock_irq(&cpu_rq(i)->lock);
*cpuusage = 0;
spin_unlock_irq(&cpu_rq(i)->lock);
}
out:
return err;
}