diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index c77206184b8b..76a8bc5f6265 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -117,6 +117,8 @@ int get_callchain_buffers(void) err = alloc_callchain_buffers(); exit: mutex_unlock(&callchain_mutex); + if (err) + atomic_dec(&nr_callchain_events); return err; } diff --git a/kernel/events/core.c b/kernel/events/core.c index f35aa7e69e2d..3b998626b7a0 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6457,7 +6457,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, struct pmu *pmu; struct perf_event *event; struct hw_perf_event *hwc; - long err; + long err = -EINVAL; if ((unsigned)cpu >= nr_cpu_ids) { if (!task || cpu != -1) @@ -6540,25 +6540,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, * we currently do not support PERF_FORMAT_GROUP on inherited events */ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) - goto done; + goto err_ns; pmu = perf_init_event(event); - -done: - err = 0; if (!pmu) - err = -EINVAL; - else if (IS_ERR(pmu)) + goto err_ns; + else if (IS_ERR(pmu)) { err = PTR_ERR(pmu); - - if (err) { - if (event->ns) - put_pid_ns(event->ns); - kfree(event); - return ERR_PTR(err); + goto err_ns; } if (!event->parent) { + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { + err = get_callchain_buffers(); + if (err) + goto err_pmu; + } + if (event->attach_state & PERF_ATTACH_TASK) static_key_slow_inc(&perf_sched_events.key); if (event->attr.mmap || event->attr.mmap_data) @@ -6573,16 +6571,19 @@ done: atomic_inc(&per_cpu(perf_branch_stack_events, event->cpu)); } - if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { - err = get_callchain_buffers(); - if (err) { - free_event(event); - return ERR_PTR(err); - } - } } return event; + +err_pmu: + if (event->destroy) + event->destroy(event); +err_ns: + if (event->ns) + put_pid_ns(event->ns); + kfree(event); + + return ERR_PTR(err); } static int perf_copy_attr(struct perf_event_attr __user *uattr,