1
0
Fork 0

perf_counter: Introduce struct for sample data

For easy extension of the sample data, put it in a structure.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
hifive-unleashed-5.1
Peter Zijlstra 2009-06-10 21:02:22 +02:00 committed by Ingo Molnar
parent ea1900e571
commit df1a132bf3
4 changed files with 48 additions and 25 deletions

View File

@ -1001,7 +1001,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
* Finally record data if requested.
*/
if (record) {
addr = 0;
struct perf_sample_data data = {
.regs = regs,
.addr = 0,
};
if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
/*
* The user wants a data address recorded.
@ -1016,9 +1020,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
addr = mfspr(SPRN_SDAR);
data.addr = mfspr(SPRN_SDAR);
}
if (perf_counter_overflow(counter, nmi, regs, addr)) {
if (perf_counter_overflow(counter, nmi, &data)) {
/*
* Interrupts are coming too fast - throttle them
* by setting the counter to 0, so it will be

View File

@ -1173,11 +1173,14 @@ static void intel_pmu_reset(void)
*/
static int intel_pmu_handle_irq(struct pt_regs *regs)
{
struct perf_sample_data data;
struct cpu_hw_counters *cpuc;
struct cpu_hw_counters;
int bit, cpu, loops;
u64 ack, status;
data.regs = regs;
data.addr = 0;
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
@ -1210,7 +1213,7 @@ again:
if (!intel_pmu_save_and_restart(counter))
continue;
if (perf_counter_overflow(counter, 1, regs, 0))
if (perf_counter_overflow(counter, 1, &data))
intel_pmu_disable_counter(&counter->hw, bit);
}
@ -1230,12 +1233,16 @@ again:
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
int cpu, idx, handled = 0;
struct perf_sample_data data;
struct cpu_hw_counters *cpuc;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
int cpu, idx, handled = 0;
u64 val;
data.regs = regs;
data.addr = 0;
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
@ -1256,7 +1263,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_counter_set_period(counter, hwc, idx))
continue;
if (perf_counter_overflow(counter, 1, regs, 0))
if (perf_counter_overflow(counter, 1, &data))
amd_pmu_disable_counter(hwc, idx);
}

View File

@ -605,8 +605,14 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
struct perf_counter_context *ctx, int cpu);
extern void perf_counter_update_userpage(struct perf_counter *counter);
extern int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr);
struct perf_sample_data {
struct pt_regs *regs;
u64 addr;
};
extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data);
/*
* Return 1 for a software counter, 0 for a hardware counter
*/

View File

@ -2378,8 +2378,8 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
return task_pid_nr_ns(p, counter->ns);
}
static void perf_counter_output(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr)
static void perf_counter_output(struct perf_counter *counter, int nmi,
struct perf_sample_data *data)
{
int ret;
u64 sample_type = counter->attr.sample_type;
@ -2404,10 +2404,10 @@ static void perf_counter_output(struct perf_counter *counter,
header.size = sizeof(header);
header.misc = PERF_EVENT_MISC_OVERFLOW;
header.misc |= perf_misc_flags(regs);
header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(regs);
ip = perf_instruction_pointer(data->regs);
header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip);
}
@ -2460,7 +2460,7 @@ static void perf_counter_output(struct perf_counter *counter,
}
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
callchain = perf_callchain(regs);
callchain = perf_callchain(data->regs);
if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64);
@ -2486,7 +2486,7 @@ static void perf_counter_output(struct perf_counter *counter,
perf_output_put(&handle, time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(&handle, addr);
perf_output_put(&handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(&handle, counter->id);
@ -2950,8 +2950,8 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
* Generic counter overflow handling.
*/
int perf_counter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr)
int perf_counter_overflow(struct perf_counter *counter, int nmi,
struct perf_sample_data *data)
{
int events = atomic_read(&counter->event_limit);
int throttle = counter->pmu->unthrottle != NULL;
@ -3005,7 +3005,7 @@ int perf_counter_overflow(struct perf_counter *counter,
perf_counter_disable(counter);
}
perf_counter_output(counter, nmi, regs, addr);
perf_counter_output(counter, nmi, data);
return ret;
}
@ -3054,24 +3054,25 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct perf_counter *counter;
struct pt_regs *regs;
u64 period;
counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
counter->pmu->read(counter);
regs = get_irq_regs();
data.addr = 0;
data.regs = get_irq_regs();
/*
* In case we exclude kernel IPs or are somehow not in interrupt
* context, provide the next best thing, the user IP.
*/
if ((counter->attr.exclude_kernel || !regs) &&
if ((counter->attr.exclude_kernel || !data.regs) &&
!counter->attr.exclude_user)
regs = task_pt_regs(current);
data.regs = task_pt_regs(current);
if (regs) {
if (perf_counter_overflow(counter, 0, regs, 0))
if (data.regs) {
if (perf_counter_overflow(counter, 0, &data))
ret = HRTIMER_NORESTART;
}
@ -3084,9 +3085,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
static void perf_swcounter_overflow(struct perf_counter *counter,
int nmi, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
.addr = addr,
};
perf_swcounter_update(counter);
perf_swcounter_set_period(counter);
if (perf_counter_overflow(counter, nmi, regs, addr))
if (perf_counter_overflow(counter, nmi, &data))
/* soft-disable the counter */
;