1
0
Fork 0

perf/core: Add return value for perf_event_read()

When we implement the ability to read several counters at once (using
the PERF_PMU_TXN_READ transaction interface), perf_event_read() can
fail when the 'group' parameter is true (eg: trying to read too many
events at once).

For now, have perf_event_read() return an integer. Ignore the return
value when the 'group' parameter is false.

Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/1441336073-22750-8-git-send-email-sukadev@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Sukadev Bhattiprolu 2015-09-03 20:07:50 -07:00 committed by Ingo Molnar
parent fa8c269353
commit 7d88962e23
1 changed files with 34 additions and 11 deletions

View File

@ -3187,6 +3187,7 @@ void perf_event_exec(void)
struct perf_read_data {
struct perf_event *event;
bool group;
int ret;
};
/*
@ -3227,6 +3228,7 @@ static void __perf_event_read(void *info)
if (sub->state == PERF_EVENT_STATE_ACTIVE)
sub->pmu->read(sub);
}
data->ret = 0;
unlock:
raw_spin_unlock(&ctx->lock);
@ -3293,8 +3295,10 @@ u64 perf_event_read_local(struct perf_event *event)
return val;
}
static void perf_event_read(struct perf_event *event, bool group)
static int perf_event_read(struct perf_event *event, bool group)
{
int ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
@ -3303,9 +3307,11 @@ static void perf_event_read(struct perf_event *event, bool group)
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
smp_call_function_single(event->oncpu,
__perf_event_read, &data, 1);
ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@ -3326,6 +3332,8 @@ static void perf_event_read(struct perf_event *event, bool group)
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ret;
}
/*
@ -3842,7 +3850,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
perf_event_read(event, false);
(void)perf_event_read(event, false);
total += perf_event_count(event);
*enabled += event->total_time_enabled +
@ -3851,7 +3859,7 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
perf_event_read(child, false);
(void)perf_event_read(child, false);
total += perf_event_count(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
@ -3862,13 +3870,16 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static void __perf_read_group_add(struct perf_event *leader,
static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event *sub;
int n = 1; /* skip @nr */
int ret;
perf_event_read(leader, true);
ret = perf_event_read(leader, true);
if (ret)
return ret;
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@ -3897,6 +3908,8 @@ static void __perf_read_group_add(struct perf_event *leader,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
}
return 0;
}
static int perf_read_group(struct perf_event *event,
@ -3904,7 +3917,7 @@ static int perf_read_group(struct perf_event *event,
{
struct perf_event *leader = event->group_leader, *child;
struct perf_event_context *ctx = leader->ctx;
int ret = event->read_size;
int ret;
u64 *values;
lockdep_assert_held(&ctx->mutex);
@ -3921,17 +3934,27 @@ static int perf_read_group(struct perf_event *event,
*/
mutex_lock(&leader->child_mutex);
__perf_read_group_add(leader, read_format, values);
list_for_each_entry(child, &leader->child_list, child_list)
__perf_read_group_add(child, read_format, values);
ret = __perf_read_group_add(leader, read_format, values);
if (ret)
goto unlock;
list_for_each_entry(child, &leader->child_list, child_list) {
ret = __perf_read_group_add(child, read_format, values);
if (ret)
goto unlock;
}
mutex_unlock(&leader->child_mutex);
ret = event->read_size;
if (copy_to_user(buf, values, event->read_size))
ret = -EFAULT;
goto out;
unlock:
mutex_unlock(&leader->child_mutex);
out:
kfree(values);
return ret;
}
@ -4037,7 +4060,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
static void _perf_event_reset(struct perf_event *event)
{
perf_event_read(event, false);
(void)perf_event_read(event, false);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}