1
0
Fork 0

perf tools: Use perf_cpu_map__nr instead of cpu_map__nr

Switch the rest of the perf code to use libperf's perf_cpu_map__nr(),
which is the same as current cpu_map__nr() and remove the cpu_map__nr()
function.

Link: http://lkml.kernel.org/n/tip-6e0guy75clis7nm0xpuz9fga@git.kernel.org
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190822111141.25823-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
alistair/sunxi64-5.4-dsi
Jiri Olsa 2019-08-22 13:11:38 +02:00 committed by Arnaldo Carvalho de Melo
parent db9a5fd02a
commit 6549cd8f2c
5 changed files with 9 additions and 14 deletions

View File

@ -653,7 +653,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
cpu_map = online_cpus;
} else {
/* Make sure all specified CPUs are online */
for (i = 0; i < cpu_map__nr(event_cpus); i++) {
for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
if (cpu_map__has(event_cpus, i) &&
!cpu_map__has(online_cpus, i))
return -EINVAL;
@ -662,7 +662,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
cpu_map = event_cpus;
}
nr_cpu = cpu_map__nr(cpu_map);
nr_cpu = perf_cpu_map__nr(cpu_map);
/* Get PMU type as dynamically assigned by the core */
type = cs_etm_pmu->type;

View File

@ -49,11 +49,6 @@ static inline int cpu_map__id_to_cpu(int id)
return id & 0xffff;
}
static inline int cpu_map__nr(const struct perf_cpu_map *map)
{
return map ? map->nr : 1;
}
static inline bool cpu_map__empty(const struct perf_cpu_map *map)
{
return map ? map->map[0] == -1 : true;

View File

@ -370,7 +370,7 @@ static int perf_evlist__enable_event_thread(struct evlist *evlist,
int thread)
{
int cpu;
int nr_cpus = cpu_map__nr(evlist->core.cpus);
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
if (!evsel->core.fd)
return -EINVAL;
@ -396,7 +396,7 @@ int perf_evlist__enable_event_idx(struct evlist *evlist,
int perf_evlist__alloc_pollfd(struct evlist *evlist)
{
int nr_cpus = cpu_map__nr(evlist->core.cpus);
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->core.threads);
int nfds = 0;
struct evsel *evsel;
@ -692,7 +692,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist,
int i;
struct perf_mmap *map;
evlist->nr_mmaps = cpu_map__nr(evlist->core.cpus);
evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
if (cpu_map__empty(evlist->core.cpus))
evlist->nr_mmaps = thread_map__nr(evlist->core.threads);
map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
@ -807,7 +807,7 @@ static int perf_evlist__mmap_per_cpu(struct evlist *evlist,
struct mmap_params *mp)
{
int cpu, thread;
int nr_cpus = cpu_map__nr(evlist->core.cpus);
int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
int nr_threads = thread_map__nr(evlist->core.threads);
pr_debug2("perf event ring buffer mmapped per cpu\n");
@ -1014,7 +1014,7 @@ int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
evlist__for_each_entry(evlist, evsel) {
if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
}

View File

@ -331,7 +331,7 @@ static void build_node_mask(int node, cpu_set_t *mask)
if (!cpu_map)
return;
nr_cpus = cpu_map__nr(cpu_map);
nr_cpus = perf_cpu_map__nr(cpu_map);
for (c = 0; c < nr_cpus; c++) {
cpu = cpu_map->map[c]; /* map c index to online cpu index */
if (cpu__get_node(cpu) == node)

View File

@ -745,7 +745,7 @@ static void print_aggr_thread(struct perf_stat_config *config,
{
FILE *output = config->output;
int nthreads = thread_map__nr(counter->core.threads);
int ncpus = cpu_map__nr(counter->core.cpus);
int ncpus = perf_cpu_map__nr(counter->core.cpus);
int thread, sorted_threads, id;
struct perf_aggr_thread_value *buf;