1
0
Fork 0

A set of fixes and improvements for the perf subsystem:

- Kernel fixes:
 
    - Install cgroup events to the correct CPU context to prevent a
      potential list double add
 
    - Prevent am intgeer underflow in the perf mlock acounting
 
    - Add a missing prototyp for arch_perf_update_userpage()
 
  - Tooling:
 
    - Add a missing unlock in the error path of maps__insert() in perf maps.
 
    - Fix the build with the latest libbfd
 
    - Fix the perf parser so it does not delete parse event terms, which
      caused a regression for using perf with the ARM CoreSight as the sink
      confuguration was missing due to the deletion.
 
    - Fix the double free in the perf CPU map merging test case
 
    - Add the missing ustring support for the perf probe command
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl5AC0ITHHRnbHhAbGlu
 dXRyb25peC5kZQAKCRCmGPVMDXSYoaJtD/4jEdN6KNGVJIQ5jOYdchXK/zb68plS
 3By6CegbaNq1SU5UPIdMX4BkznVGaVtJU/0hWuvD/ycpBTAMgKjwalYJtAC+anVi
 JhG7NiPRV1Nhm+7eZ/78mUpW4CUimTlvZVzU/yneYdFm2klvcxUHblJYSqEGp0AS
 r2aZRsqQnWSoI/+z+0THO8tI+HLSpkmKy2slLxaZphI0VjSrjWPDHfF6eAOyl/dq
 lTCz+tjd6EytELL+lhWFsGXYAi6HPKP3T4yPRH+eDYKQmByYaEYbK3E8wg/0XB/J
 2AHgSBf9pSPDBIkLOWOidmkmWgZD9ykCTyOPu4N0S70+NeaCm2nXLTOQ7dnyLE7t
 WCx8mvnIS2hshNUoXMkarG5LYexPupDMMEfHyUT5+T2rKxacKWLaRoIV+JCsUpQb
 m6eU3+n/YsN1C05V75Fuztt4irGhltlQxcG8F3gH/vqSy6VDdZb8lMU6+iyE2VKG
 ezsI7AMQkT6LrTGa2hXHHnnluaxHHSA32GPe4W1QTwMCMWMtRTwQHBBLoJ4mC0wk
 iujB9DVuh7ljmr7QSG9ZYV91eplpzJDUC54P6Qs/p7ouG4YzkIO6glt6BOgBmbp7
 YkrJtGpV6npjJmLckktcSd9rtnCzot6yGxeaIVfLPhhtf2KECSCckCyddwkakt0A
 wwVVBe8RNxXf2A==
 =xu7D
 -----END PGP SIGNATURE-----

Merge tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "A set of fixes and improvements for the perf subsystem:

  Kernel fixes:

   - Install cgroup events to the correct CPU context to prevent a
     potential list double add

   - Prevent an integer underflow in the perf mlock accounting

   - Add a missing prototype for arch_perf_update_userpage()

  Tooling:

   - Add a missing unlock in the error path of maps__insert() in perf
     maps.

   - Fix the build with the latest libbfd

   - Fix the perf parser so it does not delete parse event terms, which
     caused a regression for using perf with the ARM CoreSight as the
     sink configuration was missing due to the deletion.

   - Fix the double free in the perf CPU map merging test case

   - Add the missing ustring support for the perf probe command"

* tag 'perf-urgent-2020-02-09' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf maps: Add missing unlock to maps__insert() error case
  perf probe: Add ustring support for perf probe command
  perf: Make perf able to build with latest libbfd
  perf test: Fix test case Merge cpu map
  perf parse: Copy string to perf_evsel_config_term
  perf parse: Refactor 'struct perf_evsel_config_term'
  kernel/events: Add a missing prototype for arch_perf_update_userpage()
  perf/cgroups: Install cgroup events to correct cpuctx
  perf/core: Fix mlock accounting in perf_mmap()
alistair/sensors
Linus Torvalds 2020-02-09 12:04:09 -08:00
commit ca21b9b370
10 changed files with 88 additions and 36 deletions

View File

@ -1544,4 +1544,8 @@ int perf_event_exit_cpu(unsigned int cpu);
#define perf_event_exit_cpu NULL
#endif
extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
#endif /* _LINUX_PERF_EVENT_H */

View File

@ -951,9 +951,9 @@ list_update_cgroup_event(struct perf_event *event,
/*
* Because cgroup events are always per-cpu events,
* this will always be called from the right CPU.
* @ctx == &cpuctx->ctx.
*/
cpuctx = __get_cpu_context(ctx);
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
/*
* Since setting cpuctx->cgrp is conditional on the current @cgrp
@ -979,7 +979,8 @@ list_update_cgroup_event(struct perf_event *event,
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
if (add)
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
list_add(cpuctx_entry,
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
else
list_del(cpuctx_entry);
}
@ -5916,7 +5917,15 @@ accounting:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
user_locked = atomic_long_read(&user->locked_vm);
/*
* sysctl_perf_event_mlock may have changed, so that
* user->locked_vm > user_lock_limit
*/
if (user_locked > user_lock_limit)
user_locked = user_lock_limit;
user_locked += user_extra;
if (user_locked > user_lock_limit) {
/*

View File

@ -226,7 +226,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
continue;
sink = term->val.drv_cfg;
sink = term->val.str;
snprintf(path, PATH_MAX, "sinks/%s", sink);
ret = perf_pmu__scan_file(pmu, path, "%x", &hash);

View File

@ -131,7 +131,6 @@ int test__cpu_map_merge(struct test *test __maybe_unused, int subtest __maybe_un
TEST_ASSERT_VAL("failed to merge map: bad nr", c->nr == 5);
cpu_map__snprint(c, buf, sizeof(buf));
TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
perf_cpu_map__put(a);
perf_cpu_map__put(b);
perf_cpu_map__put(c);
return 0;

View File

@ -808,12 +808,12 @@ static void apply_config_terms(struct evsel *evsel,
perf_evsel__reset_sample_bit(evsel, TIME);
break;
case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
callgraph_buf = term->val.callgraph;
callgraph_buf = term->val.str;
break;
case PERF_EVSEL__CONFIG_TERM_BRANCH:
if (term->val.branch && strcmp(term->val.branch, "no")) {
if (term->val.str && strcmp(term->val.str, "no")) {
perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
parse_branch_str(term->val.branch,
parse_branch_str(term->val.str,
&attr->branch_sample_type);
} else
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
@ -1265,6 +1265,8 @@ static void perf_evsel__free_config_terms(struct evsel *evsel)
list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
free(term);
}
}

View File

@ -32,22 +32,21 @@ enum evsel_term_type {
struct perf_evsel_config_term {
struct list_head list;
enum evsel_term_type type;
bool free_str;
union {
u64 period;
u64 freq;
bool time;
char *callgraph;
char *drv_cfg;
u64 stack_user;
int max_stack;
bool inherit;
bool overwrite;
char *branch;
unsigned long max_events;
bool percore;
bool aux_output;
u32 aux_sample_size;
u64 cfg_chg;
char *str;
} val;
bool weak;
};

View File

@ -549,6 +549,7 @@ void maps__insert(struct maps *maps, struct map *map)
if (maps_by_name == NULL) {
__maps__free_maps_by_name(maps);
up_write(&maps->lock);
return;
}

View File

@ -1219,8 +1219,7 @@ static int config_attr(struct perf_event_attr *attr,
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused)
{
#define ADD_CONFIG_TERM(__type, __name, __val) \
do { \
#define ADD_CONFIG_TERM(__type) \
struct perf_evsel_config_term *__t; \
\
__t = zalloc(sizeof(*__t)); \
@ -1229,9 +1228,24 @@ do { \
\
INIT_LIST_HEAD(&__t->list); \
__t->type = PERF_EVSEL__CONFIG_TERM_ ## __type; \
__t->val.__name = __val; \
__t->weak = term->weak; \
list_add_tail(&__t->list, head_terms); \
list_add_tail(&__t->list, head_terms)
#define ADD_CONFIG_TERM_VAL(__type, __name, __val) \
do { \
ADD_CONFIG_TERM(__type); \
__t->val.__name = __val; \
} while (0)
#define ADD_CONFIG_TERM_STR(__type, __val) \
do { \
ADD_CONFIG_TERM(__type); \
__t->val.str = strdup(__val); \
if (!__t->val.str) { \
zfree(&__t); \
return -ENOMEM; \
} \
__t->free_str = true; \
} while (0)
struct parse_events_term *term;
@ -1239,53 +1253,62 @@ do { \
list_for_each_entry(term, head_config, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM(PERIOD, period, term->val.num);
ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
ADD_CONFIG_TERM(FREQ, freq, term->val.num);
ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
ADD_CONFIG_TERM(TIME, time, term->val.num);
ADD_CONFIG_TERM_VAL(TIME, time, term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str);
ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str);
break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
ADD_CONFIG_TERM(BRANCH, branch, term->val.str);
ADD_CONFIG_TERM_STR(BRANCH, term->val.str);
break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_INHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 1 : 0);
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 1 : 0);
break;
case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
ADD_CONFIG_TERM_VAL(INHERIT, inherit,
term->val.num ? 0 : 1);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num);
ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
ADD_CONFIG_TERM(MAX_EVENTS, max_events, term->val.num);
ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
term->val.num);
break;
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 1 : 0);
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 1 : 0);
break;
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 0 : 1);
ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
term->val.num ? 0 : 1);
break;
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
ADD_CONFIG_TERM(DRV_CFG, drv_cfg, term->val.str);
ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str);
break;
case PARSE_EVENTS__TERM_TYPE_PERCORE:
ADD_CONFIG_TERM(PERCORE, percore,
term->val.num ? true : false);
ADD_CONFIG_TERM_VAL(PERCORE, percore,
term->val.num ? true : false);
break;
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
ADD_CONFIG_TERM(AUX_OUTPUT, aux_output, term->val.num ? 1 : 0);
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0);
break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM(AUX_SAMPLE_SIZE, aux_sample_size, term->val.num);
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num);
break;
default:
break;
@ -1322,7 +1345,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
}
if (bits)
ADD_CONFIG_TERM(CFG_CHG, cfg_chg, bits);
ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits);
#undef ADD_CONFIG_TERM
return 0;

View File

@ -303,7 +303,8 @@ static int convert_variable_type(Dwarf_Die *vr_die,
char prefix;
/* TODO: check all types */
if (cast && strcmp(cast, "string") != 0 && strcmp(cast, "x") != 0 &&
if (cast && strcmp(cast, "string") != 0 && strcmp(cast, "ustring") &&
strcmp(cast, "x") != 0 &&
strcmp(cast, "s") != 0 && strcmp(cast, "u") != 0) {
/* Non string type is OK */
/* and respect signedness/hexadecimal cast */

View File

@ -193,16 +193,30 @@ static void find_address_in_section(bfd *abfd, asection *section, void *data)
bfd_vma pc, vma;
bfd_size_type size;
struct a2l_data *a2l = data;
flagword flags;
if (a2l->found)
return;
if ((bfd_get_section_flags(abfd, section) & SEC_ALLOC) == 0)
#ifdef bfd_get_section_flags
flags = bfd_get_section_flags(abfd, section);
#else
flags = bfd_section_flags(section);
#endif
if ((flags & SEC_ALLOC) == 0)
return;
pc = a2l->addr;
#ifdef bfd_get_section_vma
vma = bfd_get_section_vma(abfd, section);
#else
vma = bfd_section_vma(section);
#endif
#ifdef bfd_get_section_size
size = bfd_get_section_size(section);
#else
size = bfd_section_size(section);
#endif
if (pc < vma || pc >= vma + size)
return;