1
0
Fork 0

perf hists: Move locking to its call-sites

It's a preparation patch to eliminate unneeded locking in the perf
report path.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1368497347-9628-5-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
hifive-unleashed-5.1
Namhyung Kim 2013-05-14 11:09:02 +09:00 committed by Arnaldo Carvalho de Melo
parent 3a5714f8b5
commit 27a0dcb7ad
3 changed files with 18 additions and 17 deletions

View File

@ -297,6 +297,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_report *rep = container_of(tool, struct perf_report, tool);
struct addr_location al;
int ret;
if (perf_event__preprocess_sample(event, machine, &al, sample,
rep->annotate_init) < 0) {
@ -311,28 +312,29 @@ static int process_sample_event(struct perf_tool *tool,
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
pthread_mutex_lock(&evsel->hists.lock);
if (sort__mode == SORT_MODE__BRANCH) {
if (perf_report__add_branch_hist_entry(tool, &al, sample,
evsel, machine)) {
ret = perf_report__add_branch_hist_entry(tool, &al, sample,
evsel, machine);
if (ret < 0)
pr_debug("problem adding lbr entry, skipping event\n");
return -1;
}
} else if (rep->mem_mode == 1) {
if (perf_report__add_mem_hist_entry(tool, &al, sample,
evsel, machine, event)) {
ret = perf_report__add_mem_hist_entry(tool, &al, sample,
evsel, machine, event);
if (ret < 0)
pr_debug("problem adding mem entry, skipping event\n");
return -1;
}
} else {
if (al.map != NULL)
al.map->dso->hit = 1;
if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) {
ret = perf_evsel__add_hist_entry(evsel, &al, sample, machine);
if (ret < 0)
pr_debug("problem incrementing symbol period, skipping event\n");
return -1;
}
}
return 0;
pthread_mutex_unlock(&evsel->hists.lock);
return ret;
}
static int process_read_event(struct perf_tool *tool,

View File

@ -245,8 +245,11 @@ static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
{
struct hist_entry *he;
pthread_mutex_lock(&evsel->hists.lock);
he = __hists__add_entry(&evsel->hists, al, NULL, sample->period,
sample->weight);
pthread_mutex_unlock(&evsel->hists.lock);
if (he == NULL)
return NULL;

View File

@ -347,8 +347,6 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
struct hist_entry *he;
int cmp;
pthread_mutex_lock(&hists->lock);
p = &hists->entries_in->rb_node;
while (*p != NULL) {
@ -394,14 +392,12 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
he = hist_entry__new(entry);
if (!he)
goto out_unlock;
return NULL;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
hist_entry__add_cpumode_period(he, al->cpumode, period);
out_unlock:
pthread_mutex_unlock(&hists->lock);
return he;
}