1
0
Fork 0

Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
 "Kernel side changes:

   - AMD range breakpoints support:

     Extend breakpoint tools and core to support address range through
     perf event with initial backend support for AMD extended
     breakpoints.

     The syntax is:

         perf record -e mem:addr/len:type

     For example set write breakpoint from 0x1000 to 0x1200 (0x1000 + 512)

         perf record -e mem:0x1000/512:w

   - event throttling/rotating fixes

   - various event group handling fixes, cleanups and general paranoia
     code to be more robust against bugs in the future.

    - kernel stack overhead fixes

  User-visible tooling side changes:

   - Show precise number of samples in at the end of a 'record' session,
     if processing build ids, since we will then traverse the whole
     perf.data file and see all the PERF_RECORD_SAMPLE records,
     otherwise stop showing the previous off-base heuristicly counted
     number of "samples" (Namhyung Kim).

   - Support to read compressed module from build-id cache (Namhyung
     Kim)

   - Enable sampling loads and stores simultaneously in 'perf mem'
     (Stephane Eranian)

   - 'perf diff' output improvements (Namhyung Kim)

   - Fix error reporting for evsel pgfault constructor (Arnaldo Carvalho
     de Melo)

  Tooling side infrastructure changes:

   - Cache eh/debug frame offset for dwarf unwind (Namhyung Kim)

   - Support parsing parameterized events (Cody P Schafer)

   - Add support for IP address formats in libtraceevent (David Ahern)

  Plus other misc fixes"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
  perf: Decouple unthrottling and rotating
  perf: Drop module reference on event init failure
  perf: Use POLLIN instead of POLL_IN for perf poll data in flag
  perf: Fix put_event() ctx lock
  perf: Fix move_group() order
  perf: Fix event->ctx locking
  perf: Add a bit of paranoia
  perf symbols: Convert lseek + read to pread
  perf tools: Use perf_data_file__fd() consistently
  perf symbols: Support to read compressed module from build-id cache
  perf evsel: Set attr.task bit for a tracking event
  perf header: Set header version correctly
  perf record: Show precise number of samples
  perf tools: Do not use __perf_session__process_events() directly
  perf callchain: Cache eh/debug frame offset for dwarf unwind
  perf tools: Provide stub for missing pthread_attr_setaffinity_np
  perf evsel: Don't rely on malloc working for sz 0
  tools lib traceevent: Add support for IP address formats
  perf ui/tui: Show fatal error message only if exists
  perf tests: Fix typo in sample-parsing.c
  ...
hifive-unleashed-5.1
Linus Torvalds 2015-02-09 15:43:55 -08:00
commit a4cbbf549a
76 changed files with 1620 additions and 675 deletions

View File

@ -52,12 +52,18 @@ Description: Per-pmu performance monitoring events specific to the running syste
event=0x2abc
event=0x423,inv,cmask=0x3
domain=0x1,offset=0x8,starting_index=0xffff
domain=0x1,offset=0x8,core=?
Each of the assignments indicates a value to be assigned to a
particular set of bits (as defined by the format file
corresponding to the <term>) in the perf_event structure passed
to the perf_open syscall.
In the case of the last example, a value replacing "?" would
need to be provided by the user selecting the particular event.
This is referred to as "event parameterization". Event
parameters have the format 'param=?'.
What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit
Date: 2014/02/24
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@ -174,6 +174,7 @@
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
/*
@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void);

View File

@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { }
static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */
#ifdef CONFIG_CPU_SUP_AMD
extern void set_dr_addr_mask(unsigned long mask, int dr);
#else
static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
#endif
#endif /* _ASM_X86_DEBUGREG_H */

View File

@ -12,6 +12,7 @@
*/
struct arch_hw_breakpoint {
unsigned long address;
unsigned long mask;
u8 len;
u8 type;
};

View File

@ -251,6 +251,10 @@
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
#define MSR_F16H_DR1_ADDR_MASK 0xc0011019
#define MSR_F16H_DR2_ADDR_MASK 0xc001101a
#define MSR_F16H_DR3_ADDR_MASK 0xc001101b
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
/* Fam 15h MSRs */
#define MSR_F15H_PERF_CTL 0xc0010200

View File

@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
return false;
}
void set_dr_addr_mask(unsigned long mask, int dr)
{
if (!cpu_has_bpext)
return;
switch (dr) {
case 0:
wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
break;
case 1:
case 2:
case 3:
wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
break;
default:
break;
}
}

View File

@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*dr7 |= encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
if (info->mask)
set_dr_addr_mask(info->mask, i);
return 0;
}
@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
*dr7 &= ~__encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
}
static int get_hbp_len(u8 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case X86_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case X86_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case X86_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
#endif
}
return len_in_bytes;
if (info->mask)
set_dr_addr_mask(0, i);
}
/*
@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
va = info->address;
len = get_hbp_len(info->len);
len = bp->attr.bp_len;
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp)
}
/* Len */
info->mask = 0;
switch (bp->attr.bp_len) {
case HW_BREAKPOINT_LEN_1:
info->len = X86_BREAKPOINT_LEN_1;
@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp)
break;
#endif
default:
return -EINVAL;
if (!is_power_of_2(bp->attr.bp_len))
return -EINVAL;
if (!cpu_has_bpext)
return -EOPNOTSUPP;
info->mask = bp->attr.bp_len - 1;
info->len = X86_BREAKPOINT_LEN_1;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (ret)
return ret;
ret = -EINVAL;
switch (info->len) {
case X86_BREAKPOINT_LEN_1:
align = 0;
if (info->mask)
align = info->mask;
break;
case X86_BREAKPOINT_LEN_2:
align = 1;
@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
break;
#endif
default:
return ret;
WARN_ON_ONCE(1);
}
/*

View File

@ -595,7 +595,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp);
struct pt_regs **regs, int *rctxp);
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,

View File

@ -469,6 +469,7 @@ struct perf_event_context {
*/
struct mutex mutex;
struct list_head active_ctx_list;
struct list_head pinned_groups;
struct list_head flexible_groups;
struct list_head event_list;
@ -519,7 +520,6 @@ struct perf_cpu_context {
int exclusive;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
struct list_head rotation_list;
struct pmu *unique_pmu;
struct perf_cgroup *cgrp;
};
@ -659,6 +659,7 @@ static inline int is_software_event(struct perf_event *event)
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs
@ -683,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct pt_regs hot_regs;
if (static_key_false(&perf_swevent_enabled[event_id])) {
if (!regs) {
perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs;
}
if (static_key_false(&perf_swevent_enabled[event_id]))
__perf_sw_event(event_id, nr, regs, addr);
}
DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
/*
* 'Special' version for the scheduler, it hard assumes no recursion,
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
if (static_key_false(&perf_swevent_enabled[event_id])) {
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
perf_fetch_caller_regs(regs);
___perf_sw_event(event_id, nr, regs, addr);
}
}
@ -706,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next)
{
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next);
@ -817,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks

View File

@ -763,7 +763,7 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \
struct pt_regs *__regs; \
u64 __addr = 0, __count = 1; \
struct task_struct *__task = NULL; \
struct hlist_head *head; \
@ -782,18 +782,19 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
perf_fetch_caller_regs(&__regs); \
entry = perf_trace_buf_prepare(__entry_size, \
event_call->event.type, &__regs, &rctx); \
if (!entry) \
return; \
\
perf_fetch_caller_regs(__regs); \
\
tstruct \
\
{ assign; } \
\
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, &__regs, head, __task); \
__count, __regs, head, __task); \
}
/*

View File

@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu)
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, rotation_list);
static DEFINE_PER_CPU(struct list_head, active_ctx_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
* perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
* perf_event_task_tick() are fully serialized because they're strictly cpu
* affine and perf_event_ctx{activate,deactivate} are called with IRQs
* disabled, while perf_event_task_tick is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu)
static void perf_event_ctx_activate(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = this_cpu_ptr(&rotation_list);
struct list_head *head = this_cpu_ptr(&active_ctx_list);
WARN_ON(!irqs_disabled());
if (list_empty(&cpuctx->rotation_list))
list_add(&cpuctx->rotation_list, head);
WARN_ON(!list_empty(&ctx->active_ctx_list));
list_add(&ctx->active_ctx_list, head);
}
static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
{
WARN_ON(!irqs_disabled());
WARN_ON(list_empty(&ctx->active_ctx_list));
list_del_init(&ctx->active_ctx_list);
}
static void get_ctx(struct perf_event_context *ctx)
@ -906,6 +916,84 @@ static void put_ctx(struct perf_event_context *ctx)
}
}
/*
* Because of perf_event::ctx migration in sys_perf_event_open::move_group and
* perf_pmu_migrate_context() we need some magic.
*
* Those places that change perf_event::ctx will hold both
* perf_event_ctx::mutex of the 'old' and 'new' ctx value.
*
* Lock ordering is by mutex address. There is one other site where
* perf_event_context::mutex nests and that is put_event(). But remember that
* that is a parent<->child context relation, and migration does not affect
* children, therefore these two orderings should not interact.
*
* The change in perf_event::ctx does not affect children (as claimed above)
* because the sys_perf_event_open() case will install a new event and break
* the ctx parent<->child relation, and perf_pmu_migrate_context() is only
* concerned with cpuctx and that doesn't have children.
*
* The places that change perf_event::ctx will issue:
*
* perf_remove_from_context();
* synchronize_rcu();
* perf_install_in_context();
*
* to affect the change. The remove_from_context() + synchronize_rcu() should
* quiesce the event, after which we can install it in the new location. This
* means that only external vectors (perf_fops, prctl) can perturb the event
* while in transit. Therefore all such accessors should also acquire
* perf_event_context::mutex to serialize against this.
*
* However; because event->ctx can change while we're waiting to acquire
* ctx->mutex we must be careful and use the below perf_event_ctx_lock()
* function.
*
* Lock order:
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event_context::lock
* perf_event::child_mutex;
* perf_event::mmap_mutex
* mmap_sem
*/
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
{
struct perf_event_context *ctx;
again:
rcu_read_lock();
ctx = ACCESS_ONCE(event->ctx);
if (!atomic_inc_not_zero(&ctx->refcount)) {
rcu_read_unlock();
goto again;
}
rcu_read_unlock();
mutex_lock_nested(&ctx->mutex, nesting);
if (event->ctx != ctx) {
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
goto again;
}
return ctx;
}
static inline struct perf_event_context *
perf_event_ctx_lock(struct perf_event *event)
{
return perf_event_ctx_lock_nested(event, 0);
}
static void perf_event_ctx_unlock(struct perf_event *event,
struct perf_event_context *ctx)
{
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
/*
* This must be done under the ctx->lock, such as to serialize against
* context_equiv(), therefore we cannot call put_ctx() since that might end up
@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event)
if (group_leader == event)
return;
WARN_ON_ONCE(group_leader->ctx != event->ctx);
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
@ -1296,6 +1384,10 @@ static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
/*
* We can have double detach due to exit/hot-unplug + close.
*/
@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event)
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
WARN_ON_ONCE(sibling->ctx != event->ctx);
}
out:
@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event,
{
u64 tstamp = perf_event_time(event);
u64 delta;
WARN_ON_ONCE(event->ctx != ctx);
lockdep_assert_held(&ctx->lock);
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
if (!--ctx->nr_active)
perf_event_ctx_deactivate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info)
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
void perf_event_disable(struct perf_event *event)
static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@ -1695,6 +1794,19 @@ retry:
}
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Strictly speaking kernel users cannot create groups and therefore this
* interface does not need the perf_event_ctx_lock() magic.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_disable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event,
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
if (!ctx->nr_active++)
perf_event_ctx_activate(ctx);
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
@ -2158,7 +2271,7 @@ unlock:
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
void perf_event_enable(struct perf_event *event)
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@ -2214,9 +2327,21 @@ retry:
out:
raw_spin_unlock_irq(&ctx->lock);
}
/*
* See perf_event_disable();
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_enable(event);
perf_event_ctx_unlock(event, ctx);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
return -EINVAL;
atomic_add(refresh, &event->event_limit);
perf_event_enable(event);
_perf_event_enable(event);
return 0;
}
/*
* See perf_event_disable()
*/
int perf_event_refresh(struct perf_event *event, int refresh)
{
struct perf_event_context *ctx;
int ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_event_refresh(event, refresh);
perf_event_ctx_unlock(event, ctx);
return ret;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
}
/*
@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx)
list_rotate_left(&ctx->flexible_groups);
}
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static int perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0, remove = 1;
int rotate = 0;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
return rotate;
}
@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void)
void perf_event_task_tick(void)
{
struct list_head *head = this_cpu_ptr(&rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
struct list_head *head = this_cpu_ptr(&active_ctx_list);
struct perf_event_context *ctx, *tmp;
int throttled;
WARN_ON(!irqs_disabled());
@ -2976,14 +3100,8 @@ void perf_event_task_tick(void)
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
ctx = &cpuctx->ctx;
list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
perf_adjust_freq_unthr_context(ctx, throttled);
ctx = cpuctx->task_ctx;
if (ctx)
perf_adjust_freq_unthr_context(ctx, throttled);
}
}
static int event_enable_on_exec(struct perf_event *event,
@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->active_ctx_list);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event)
rcu_read_unlock();
if (owner) {
mutex_lock(&owner->perf_event_mutex);
/*
* If we're here through perf_event_exit_task() we're already
* holding ctx->mutex which would be an inversion wrt. the
* normal lock order.
*
* However we can safely take this lock because its the child
* ctx->mutex.
*/
mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event)
*/
static void put_event(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event_context *ctx;
if (!atomic_long_dec_and_test(&event->refcount))
return;
@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event)
if (!is_kernel_event(event))
perf_remove_from_owner(event);
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event)
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
WARN_ON_ONCE(ctx->parent_ctx);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
u64 values[5];
int n = 0, size = 0, ret;
u64 count, enabled, running;
u64 values[5];
lockdep_assert_held(&ctx->mutex);
mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
goto unlock;
return -EFAULT;
ret = size;
@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event,
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
ret = -EFAULT;
goto unlock;
return -EFAULT;
}
ret += size;
}
unlock:
mutex_unlock(&ctx->mutex);
return ret;
}
@ -3660,8 +3786,14 @@ static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
int ret;
return perf_read_hw(event, buf, count);
ctx = perf_event_ctx_lock(event);
ret = perf_read_hw(event, buf, count);
perf_event_ctx_unlock(event, ctx);
return ret;
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
return events;
}
static void perf_event_reset(struct perf_event *event)
static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event,
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event,
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
lockdep_assert_held(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = perf_event_enable;
func = _perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = perf_event_disable;
func = _perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = perf_event_reset;
func = _perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return perf_event_refresh(event, arg);
return _perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
long ret;
ctx = perf_event_ctx_lock(event);
ret = _perf_ioctl(event, cmd, arg);
perf_event_ctx_unlock(event, ctx);
return ret;
}
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
int perf_event_task_enable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
list_for_each_entry(event, &current->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_enable);
list_for_each_entry(event, &current->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_enable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(&current->perf_event_mutex);
return 0;
@ -3895,11 +4043,15 @@ int perf_event_task_enable(void)
int perf_event_task_disable(void)
{
struct perf_event_context *ctx;
struct perf_event *event;
mutex_lock(&current->perf_event_mutex);
list_for_each_entry(event, &current->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_disable);
list_for_each_entry(event, &current->perf_event_list, owner_entry) {
ctx = perf_event_ctx_lock(event);
perf_event_for_each_child(event, _perf_event_disable);
perf_event_ctx_unlock(event, ctx);
}
mutex_unlock(&current->perf_event_mutex);
return 0;
@ -5889,6 +6041,8 @@ end:
rcu_read_unlock();
}
DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
@ -5904,21 +6058,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
if (WARN_ON_ONCE(!regs))
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
if (unlikely(rctx < 0))
goto fail;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
___perf_sw_event(event_id, nr, regs, addr);
perf_swevent_put_recursion_context(rctx);
fail:
preempt_enable_notrace();
}
@ -6780,7 +6943,6 @@ skip_type:
__perf_cpu_hrtimer_init(cpuctx, cpu);
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
@ -6853,6 +7015,20 @@ void perf_pmu_unregister(struct pmu *pmu)
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
{
int ret;
if (!try_module_get(pmu->module))
return -ENODEV;
event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
module_put(pmu->module);
return ret;
}
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
@ -6865,24 +7041,14 @@ struct pmu *perf_init_event(struct perf_event *event)
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
if (!try_module_get(pmu->module)) {
pmu = ERR_PTR(-ENODEV);
goto unlock;
}
event->pmu = pmu;
ret = pmu->event_init(event);
ret = perf_try_init_event(pmu, event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
if (!try_module_get(pmu->module)) {
pmu = ERR_PTR(-ENODEV);
goto unlock;
}
event->pmu = pmu;
ret = pmu->event_init(event);
ret = perf_try_init_event(pmu, event);
if (!ret)
goto unlock;
@ -7246,6 +7412,15 @@ out:
return ret;
}
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@ -7261,7 +7436,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct perf_event_context *ctx, *uninitialized_var(gctx);
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
@ -7459,43 +7634,68 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
gctx = group_leader->ctx;
/*
* See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx.
*/
mutex_lock_double(&gctx->mutex, &ctx->mutex);
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader, false);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
put_ctx(gctx);
}
} else {
mutex_lock(&ctx->mutex);
}
WARN_ON_ONCE(ctx->parent_ctx);
if (move_group) {
/*
* Wait for everybody to stop referencing the events through
* the old lists, before installing it on new lists.
*/
synchronize_rcu();
/*
* Install the group siblings before the group leader.
*
* Because a group leader will try and install the entire group
* (through the sibling list, which is still in-tact), we can
* end up with siblings installed in the wrong context.
*
* By installing siblings first we NO-OP because they're not
* reachable through the group lists.
*/
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_event__state_init(sibling);
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
if (move_group) {
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
mutex_unlock(&ctx->mutex);
put_online_cpus();
@ -7603,7 +7803,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
mutex_lock(&src_ctx->mutex);
/*
* See perf_event_ctx_lock() for comments on the details
* of swizzling perf_event::ctx.
*/
mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event, false);
@ -7611,11 +7815,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
put_ctx(src_ctx);
list_add(&event->migrate_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
/*
* Wait for the events to quiesce before re-instating them.
*/
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
/*
* Re-instate events in 2 passes.
*
* Skip over group leaders and only install siblings on this first
* pass, siblings will not get enabled without a leader, however a
* leader will enable its siblings, even if those are still on the old
* context.
*/
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
if (event->group_leader == event)
continue;
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
/*
* Once all the siblings are setup properly, install the group leaders
* to make it go.
*/
list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
@ -7625,6 +7854,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
mutex_unlock(&src_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
@ -7811,14 +8041,19 @@ static void perf_free_event(struct perf_event *event,
put_event(parent);
raw_spin_lock_irq(&ctx->lock);
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
free_event(event);
}
/*
* free an unexposed, unused context as created by inheritance by
* Free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*
* Not all locks are strictly required, but take them anyway to be nice and
* help out with the lockdep assertions.
*/
void perf_event_free_task(struct task_struct *task)
{
@ -8137,7 +8372,7 @@ static void __init perf_event_init_all_cpus(void)
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
}
}
@ -8158,22 +8393,11 @@ static void perf_event_init_cpu(int cpu)
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info)
{
struct remove_event re = { .detach_group = true };
struct perf_event_context *ctx = __info;
perf_pmu_rotate_stop(ctx->pmu);
rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
__perf_remove_from_context(&re);

View File

@ -13,12 +13,13 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/poll.h>
#include "internal.h"
static void perf_output_wakeup(struct perf_output_handle *handle)
{
atomic_set(&handle->rb->poll, POLL_IN);
atomic_set(&handle->rb->poll, POLLIN);
handle->event->pending_wakeup = 1;
irq_work_queue(&handle->event->pending);

View File

@ -1082,7 +1082,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
}
__set_task_cpu(p, new_cpu);

View File

@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
}
void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp)
struct pt_regs **regs, int *rctxp)
{
struct trace_entry *entry;
unsigned long flags;
@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
if (*rctxp < 0)
return NULL;
if (regs)
*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
/* zero the dead bytes from align to not leak stack to user */

View File

@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
return;
@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
return;

View File

@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size -= sizeof(u32);
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->event.type, regs, &rctx);
sys_data->enter_event->event.type, NULL, &rctx);
if (!rec)
return;
@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
size -= sizeof(u32);
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->event.type, regs, &rctx);
sys_data->exit_event->event.type, NULL, &rctx);
if (!rec)
return;

View File

@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
if (hlist_empty(head))
goto out;
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry)
goto out;

View File

@ -1,3 +1,4 @@
#define _GNU_SOURCE
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint)
out:
return debugfs_mountpoint;
}
int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
{
char sbuf[128];
switch (err) {
case ENOENT:
if (debugfs_found) {
snprintf(buf, size,
"Error:\tFile %s/%s not found.\n"
"Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
debugfs_mountpoint, filename);
break;
}
snprintf(buf, size, "%s",
"Error:\tUnable to find debugfs\n"
"Hint:\tWas your kernel compiled with debugfs support?\n"
"Hint:\tIs the debugfs filesystem mounted?\n"
"Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
break;
case EACCES:
snprintf(buf, size,
"Error:\tNo permissions to read %s/%s\n"
"Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
debugfs_mountpoint, filename, debugfs_mountpoint);
break;
default:
snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
break;
}
return 0;
}
int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
{
char path[PATH_MAX];
snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
return debugfs__strerror_open(err, buf, size, path);
}

View File

@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint);
extern char debugfs_mountpoint[];
int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
#endif /* __API_DEBUGFS_H__ */

View File

@ -32,6 +32,7 @@
#include <stdint.h>
#include <limits.h>
#include <netinet/ip6.h>
#include "event-parse.h"
#include "event-utils.h"
@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
}
static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
{
const char *fmt;
if (i == 'i')
fmt = "%03d.%03d.%03d.%03d";
else
fmt = "%d.%d.%d.%d";
trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
}
static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
{
return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
(unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
}
static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
{
return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
}
static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
{
int i, j, range;
unsigned char zerolength[8];
int longest = 1;
int colonpos = -1;
uint16_t word;
uint8_t hi, lo;
bool needcolon = false;
bool useIPv4;
struct in6_addr in6;
memcpy(&in6, addr, sizeof(struct in6_addr));
useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
memset(zerolength, 0, sizeof(zerolength));
if (useIPv4)
range = 6;
else
range = 8;
/* find position of longest 0 run */
for (i = 0; i < range; i++) {
for (j = i; j < range; j++) {
if (in6.s6_addr16[j] != 0)
break;
zerolength[i]++;
}
}
for (i = 0; i < range; i++) {
if (zerolength[i] > longest) {
longest = zerolength[i];
colonpos = i;
}
}
if (longest == 1) /* don't compress a single 0 */
colonpos = -1;
/* emit address */
for (i = 0; i < range; i++) {
if (i == colonpos) {
if (needcolon || i == 0)
trace_seq_printf(s, ":");
trace_seq_printf(s, ":");
needcolon = false;
i += longest - 1;
continue;
}
if (needcolon) {
trace_seq_printf(s, ":");
needcolon = false;
}
/* hex u16 without leading 0s */
word = ntohs(in6.s6_addr16[i]);
hi = word >> 8;
lo = word & 0xff;
if (hi)
trace_seq_printf(s, "%x%02x", hi, lo);
else
trace_seq_printf(s, "%x", lo);
needcolon = true;
}
if (useIPv4) {
if (needcolon)
trace_seq_printf(s, ":");
print_ip4_addr(s, 'I', &in6.s6_addr[12]);
}
return;
}
static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
{
int j;
for (j = 0; j < 16; j += 2) {
trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
if (i == 'I' && j < 14)
trace_seq_printf(s, ":");
}
}
/*
* %pi4 print an IPv4 address with leading zeros
* %pI4 print an IPv4 address without leading zeros
* %pi6 print an IPv6 address without colons
* %pI6 print an IPv6 address with colons
* %pI6c print an IPv6 address in compressed form with colons
* %pISpc print an IP address based on sockaddr; p adds port.
*/
static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
void *data, int size, struct event_format *event,
struct print_arg *arg)
{
unsigned char *buf;
if (arg->type == PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return 0;
}
if (arg->type != PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return 0;
}
if (!arg->field.field) {
arg->field.field =
pevent_find_any_field(event, arg->field.name);
if (!arg->field.field) {
do_warning("%s: field %s not found",
__func__, arg->field.name);
return 0;
}
}
buf = data + arg->field.field->offset;
if (arg->field.field->size != 4) {
trace_seq_printf(s, "INVALIDIPv4");
return 0;
}
print_ip4_addr(s, i, buf);
return 0;
}
static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
void *data, int size, struct event_format *event,
struct print_arg *arg)
{
char have_c = 0;
unsigned char *buf;
int rc = 0;
/* pI6c */
if (i == 'I' && *ptr == 'c') {
have_c = 1;
ptr++;
rc++;
}
if (arg->type == PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return rc;
}
if (arg->type != PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return rc;
}
if (!arg->field.field) {
arg->field.field =
pevent_find_any_field(event, arg->field.name);
if (!arg->field.field) {
do_warning("%s: field %s not found",
__func__, arg->field.name);
return rc;
}
}
buf = data + arg->field.field->offset;
if (arg->field.field->size != 16) {
trace_seq_printf(s, "INVALIDIPv6");
return rc;
}
if (have_c)
print_ip6c_addr(s, buf);
else
print_ip6_addr(s, i, buf);
return rc;
}
static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
void *data, int size, struct event_format *event,
struct print_arg *arg)
{
char have_c = 0, have_p = 0;
unsigned char *buf;
struct sockaddr_storage *sa;
int rc = 0;
/* pISpc */
if (i == 'I') {
if (*ptr == 'p') {
have_p = 1;
ptr++;
rc++;
}
if (*ptr == 'c') {
have_c = 1;
ptr++;
rc++;
}
}
if (arg->type == PRINT_FUNC) {
process_defined_func(s, data, size, event, arg);
return rc;
}
if (arg->type != PRINT_FIELD) {
trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
return rc;
}
if (!arg->field.field) {
arg->field.field =
pevent_find_any_field(event, arg->field.name);
if (!arg->field.field) {
do_warning("%s: field %s not found",
__func__, arg->field.name);
return rc;
}
}
sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
if (sa->ss_family == AF_INET) {
struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
if (arg->field.field->size < sizeof(struct sockaddr_in)) {
trace_seq_printf(s, "INVALIDIPv4");
return rc;
}
print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
if (have_p)
trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
} else if (sa->ss_family == AF_INET6) {
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
trace_seq_printf(s, "INVALIDIPv6");
return rc;
}
if (have_p)
trace_seq_printf(s, "[");
buf = (unsigned char *) &sa6->sin6_addr;
if (have_c)
print_ip6c_addr(s, buf);
else
print_ip6_addr(s, i, buf);
if (have_p)
trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
}
return rc;
}
static int print_ip_arg(struct trace_seq *s, const char *ptr,
void *data, int size, struct event_format *event,
struct print_arg *arg)
{
char i = *ptr; /* 'i' or 'I' */
char ver;
int rc = 0;
ptr++;
rc++;
ver = *ptr;
ptr++;
rc++;
switch (ver) {
case '4':
rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
break;
case '6':
rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
break;
case 'S':
rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
break;
default:
return 0;
}
return rc;
}
static int is_printable_array(char *p, unsigned int len)
{
unsigned int i;
@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
ptr++;
arg = arg->next;
break;
} else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
int n;
n = print_ip_arg(s, ptr+1, data, size, event, arg);
if (n > 0) {
ptr += n;
arg = arg->next;
break;
}
}
/* fall through */

View File

@ -38,7 +38,7 @@ OPTIONS
--remove=::
Remove specified file from the cache.
-M::
--missing=::
--missing=::
List missing build ids in the cache for the specified file.
-u::
--update::

View File

@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used:
You should refer to the processor specific documentation for getting these
details. Some of them are referenced in the SEE ALSO section below.
PARAMETERIZED EVENTS
--------------------
Some pmu events listed by 'perf-list' will be displayed with '?' in them. For
example:
hv_gpci/dtbp_ptitc,phys_processor_idx=?/
This means that when provided as an event, a value for '?' must
also be supplied. For example:
perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
OPTIONS
-------

View File

@ -12,11 +12,12 @@ SYNOPSIS
DESCRIPTION
-----------
"perf mem -t <TYPE> record" runs a command and gathers memory operation data
"perf mem record" runs a command and gathers memory operation data
from it, into perf.data. Perf record options are accepted and are passed through.
"perf mem -t <TYPE> report" displays the result. It invokes perf report with the
right set of options to display a memory access profile.
"perf mem report" displays the result. It invokes perf report with the
right set of options to display a memory access profile. By default, loads
and stores are sampled. Use the -t option to limit to loads or stores.
Note that on Intel systems the memory latency reported is the use-latency,
not the pure load (or store latency). Use latency includes any pipeline
@ -29,7 +30,7 @@ OPTIONS
-t::
--type=::
Select the memory operation type: load or store (default: load)
Select the memory operation type: load or store (default: load,store)
-D::
--dump-raw-samples=::

View File

@ -33,12 +33,27 @@ OPTIONS
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
- a hardware breakpoint event in the form of '\mem:addr[:access]'
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
/sys/bus/event_sources/devices/<pmu>/format/*.
- a symbolically formed event like 'pmu/config=M,config1=N,config3=K/'
where M, N, K are numbers (in decimal, hex, octal format). Acceptable
values for each of 'config', 'config1' and 'config2' are defined by
corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/*
param1 and param2 are defined as formats for the PMU in:
/sys/bus/event_sources/devices/<pmu>/format/*
- a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
where addr is the address in memory you want to break in.
Access is the memory access type (read, write, execute) it can
be passed as follows: '\mem:addr[:[r][w][x]]'.
be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range,
number of bytes from specified addr, which the breakpoint will cover.
If you want to profile read-write accesses in 0x1000, just set
'mem:0x1000:rw'.
If you want to profile write accesses in [0x1000~1008), just set
'mem:0x1000/8:w'.
--filter=<filter>::
Event filter.

View File

@ -125,46 +125,46 @@ OPTIONS
is equivalent to:
perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
i.e., the specified fields apply to all event types if the type string
is not given.
The arguments are processed in the order received. A later usage can
reset a prior request. e.g.:
-f trace: -f comm,tid,time,ip,sym
The first -f suppresses trace events (field list is ""), but then the
second invocation sets the fields to comm,tid,time,ip,sym. In this case a
warning is given to the user:
"Overriding previous field request for all events."
Alternatively, consider the order:
-f comm,tid,time,ip,sym -f trace:
The first -f sets the fields for all events and the second -f
suppresses trace events. The user is given a warning message about
the override, and the result of the above is that only S/W and H/W
events are displayed with the given fields.
For the 'wildcard' option if a user selected field is invalid for an
event type, a message is displayed to the user that the option is
ignored for that type. For example:
$ perf script -f comm,tid,trace
'trace' not valid for hardware events. Ignoring.
'trace' not valid for software events. Ignoring.
Alternatively, if the type is given an invalid field is specified it
is an error. For example:
perf script -v -f sw:comm,tid,trace
'trace' not valid for software events.
At this point usage is displayed, and perf-script exits.
Finally, a user may not set fields to none for all event types.
i.e., -f "" is not allowed.

View File

@ -25,10 +25,22 @@ OPTIONS
-e::
--event=::
Select the PMU event. Selection can be a symbolic event name
(use 'perf list' to list all events) or a raw PMU
event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
Select the PMU event. Selection can be:
- a symbolic event name (use 'perf list' to list all events)
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_sources/devices/<pmu>/format/*
- a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
where M, N, K are numbers (in decimal, hex, octal format).
Acceptable values for each of 'config', 'config1' and 'config2'
parameters are defined by corresponding entries in
/sys/bus/event_sources/devices/<pmu>/format/*
-i::
--no-inherit::

View File

@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
val, opflags);
}
#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
#include <pthread.h>
static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
size_t cpusetsize,
cpu_set_t *cpuset)
{
attr = attr;
cpusetsize = cpusetsize;
cpuset = cpuset;
return 0;
}
#endif
#endif /* _FUTEX_H */

View File

@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
if (errno == ENOENT)
return false;
pr_warning("Problems with %s file, consider removing it from the cache\n",
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
} else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
pr_warning("Problems with %s file, consider removing it from the cache\n",
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
}

View File

@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
}
}
static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
{
struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
void *ptr = dfmt - dfmt->idx;
struct data__file *d = container_of(ptr, struct data__file, fmt);
return d;
}
static struct hist_entry*
get_pair_data(struct hist_entry *he, struct data__file *d)
{
@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d)
static struct hist_entry*
get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
{
void *ptr = dfmt - dfmt->idx;
struct data__file *d = container_of(ptr, struct data__file, fmt);
struct data__file *d = fmt_to_data_file(&dfmt->fmt);
return get_pair_data(he, d);
}
@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists)
next = rb_next(&he->rb_node_in);
if (!hist_entry__next_pair(he)) {
rb_erase(&he->rb_node_in, root);
hist_entry__free(he);
hist_entry__delete(he);
}
}
}
@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists)
next = rb_first(root);
while (next != NULL) {
struct hist_entry *he, *pair;
struct data__file *d;
int i;
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
pair = get_pair_data(he, &data__files[sort_compute]);
if (!pair)
continue;
data__for_each_file_new(i, d) {
pair = get_pair_data(he, d);
if (!pair)
continue;
switch (compute) {
case COMPUTE_DELTA:
compute_delta(he, pair);
break;
case COMPUTE_RATIO:
compute_ratio(he, pair);
break;
case COMPUTE_WEIGHTED_DIFF:
compute_wdiff(he, pair);
break;
default:
BUG_ON(1);
switch (compute) {
case COMPUTE_DELTA:
compute_delta(he, pair);
break;
case COMPUTE_RATIO:
compute_ratio(he, pair);
break;
case COMPUTE_WEIGHTED_DIFF:
compute_wdiff(he, pair);
break;
default:
BUG_ON(1);
}
}
}
}
@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
static int64_t
hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c)
int c, int sort_idx)
{
bool pairs_left = hist_entry__has_pairs(left);
bool pairs_right = hist_entry__has_pairs(right);
@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
if (!pairs_left || !pairs_right)
return pairs_left ? -1 : 1;
p_left = get_pair_data(left, &data__files[sort_compute]);
p_right = get_pair_data(right, &data__files[sort_compute]);
p_left = get_pair_data(left, &data__files[sort_idx]);
p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right)
return 0;
@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
}
static int64_t
hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
int c, int sort_idx)
{
struct hist_entry *p_right, *p_left;
p_left = get_pair_data(left, &data__files[sort_idx]);
p_right = get_pair_data(right, &data__files[sort_idx]);
if (!p_left && !p_right)
return 0;
if (!p_left || !p_right)
return p_left ? -1 : 1;
if (c != COMPUTE_DELTA) {
/*
* The delta can be computed without the baseline, but
* others are not. Put those entries which have no
* values below.
*/
if (left->dummy && right->dummy)
return 0;
if (left->dummy || right->dummy)
return left->dummy ? 1 : -1;
}
return __hist_entry__cmp_compute(p_left, p_right, c);
}
static int64_t
hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left __maybe_unused,
struct hist_entry *right __maybe_unused)
{
return 0;
}
static int64_t
hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
if (sort_compute)
return 0;
if (left->stat.period == right->stat.period)
return 0;
return left->stat.period > right->stat.period ? 1 : -1;
}
static int64_t
hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
}
static int64_t
hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
}
static int64_t
hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
struct data__file *d = fmt_to_data_file(fmt);
return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
}
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
static int64_t
hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node);
if (hist_entry__cmp_compute(he, iter, c) < 0)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, root);
return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
sort_compute);
}
static void hists__compute_resort(struct hists *hists)
static int64_t
hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
struct rb_root *root;
struct rb_node *next;
return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
sort_compute);
}
if (sort__need_collapse)
root = &hists->entries_collapsed;
else
root = hists->entries_in;
hists->entries = RB_ROOT;
next = rb_first(root);
hists__reset_stats(hists);
hists__reset_col_len(hists);
while (next != NULL) {
struct hist_entry *he;
he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
insert_hist_entry_by_compute(&hists->entries, he, compute);
hists__inc_stats(hists, he);
if (!he->filtered)
hists__calc_col_len(hists, he);
}
static int64_t
hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *left, struct hist_entry *right)
{
return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
sort_compute);
}
static void hists__process(struct hists *hists)
@ -637,12 +661,8 @@ static void hists__process(struct hists *hists)
if (show_baseline_only)
hists__baseline_only(hists);
if (sort_compute) {
hists__precompute(hists);
hists__compute_resort(hists);
} else {
hists__output_resort(hists, NULL);
}
hists__precompute(hists);
hists__output_resort(hists, NULL);
hists__fprintf(hists, true, 0, 0, 0, stdout);
}
@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
char pfmt[20] = " ";
if (!pair)
goto dummy_print;
goto no_print;
switch (comparison_method) {
case COMPUTE_DELTA:
@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
else
diff = compute_delta(he, pair);
if (fabs(diff) < 0.01)
goto dummy_print;
scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
return percent_color_snprintf(hpp->buf, hpp->size,
pfmt, diff);
@ -882,6 +900,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
BUG_ON(1);
}
dummy_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, "N/A");
no_print:
return scnprintf(hpp->buf, hpp->size, "%*s",
dfmt->header_width, pfmt);
}
@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
else
diff = compute_delta(he, pair);
if (fabs(diff) >= 0.01)
scnprintf(buf, size, "%+4.2F%%", diff);
scnprintf(buf, size, "%+4.2F%%", diff);
break;
case PERF_HPP_DIFF__RATIO:
/* No point for ratio number if we are dummy.. */
if (he->dummy)
if (he->dummy) {
scnprintf(buf, size, "N/A");
break;
}
if (pair->diff.computed)
ratio = pair->diff.period_ratio;
@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
case PERF_HPP_DIFF__WEIGHTED_DIFF:
/* No point for wdiff number if we are dummy.. */
if (he->dummy)
if (he->dummy) {
scnprintf(buf, size, "N/A");
break;
}
if (pair->diff.computed)
wdiff = pair->diff.wdiff;
@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
perf_hpp__register_sort_field(fmt);
}
static void ui_init(void)
static int ui_init(void)
{
struct data__file *d;
struct perf_hpp_fmt *fmt;
int i;
data__for_each_file(i, d) {
@ -1137,6 +1162,46 @@ static void ui_init(void)
data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
PERF_HPP_DIFF__PERIOD_BASELINE);
}
if (!sort_compute)
return 0;
/*
* Prepend an fmt to sort on columns at 'sort_compute' first.
* This fmt is added only to the sort list but not to the
* output fields list.
*
* Note that this column (data) can be compared twice - one
* for this 'sort_compute' fmt and another for the normal
* diff_hpp_fmt. But it shouldn't a problem as most entries
* will be sorted out by first try or baseline and comparing
* is not a costly operation.
*/
fmt = zalloc(sizeof(*fmt));
if (fmt == NULL) {
pr_err("Memory allocation failed\n");
return -1;
}
fmt->cmp = hist_entry__cmp_nop;
fmt->collapse = hist_entry__cmp_nop;
switch (compute) {
case COMPUTE_DELTA:
fmt->sort = hist_entry__cmp_delta_idx;
break;
case COMPUTE_RATIO:
fmt->sort = hist_entry__cmp_ratio_idx;
break;
case COMPUTE_WEIGHTED_DIFF:
fmt->sort = hist_entry__cmp_wdiff_idx;
break;
default:
BUG_ON(1);
}
list_add(&fmt->sort_list, &perf_hpp__sort_list);
return 0;
}
static int data_init(int argc, const char **argv)
@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
if (data_init(argc, argv) < 0)
return -1;
ui_init();
if (ui_init() < 0)
return -1;
sort__mode = SORT_MODE__DIFF;

View File

@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject)
int ret = -EINVAL;
struct perf_session *session = inject->session;
struct perf_data_file *file_out = &inject->output;
int fd = perf_data_file__fd(file_out);
signal(SIGINT, sig_handler);
@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject)
}
if (!file_out->is_pipe)
lseek(file_out->fd, session->header.data_offset, SEEK_SET);
lseek(fd, session->header.data_offset, SEEK_SET);
ret = perf_session__process_events(session, &inject->tool);
@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
session->header.data_size = inject->bytes_written;
perf_session__write_header(session, session->evlist, file_out->fd, true);
perf_session__write_header(session, session->evlist, fd, true);
}
return ret;

View File

@ -7,44 +7,47 @@
#include "util/session.h"
#include "util/data.h"
#define MEM_OPERATION_LOAD "load"
#define MEM_OPERATION_STORE "store"
static const char *mem_operation = MEM_OPERATION_LOAD;
#define MEM_OPERATION_LOAD 0x1
#define MEM_OPERATION_STORE 0x2
struct perf_mem {
struct perf_tool tool;
char const *input_name;
bool hide_unresolved;
bool dump_raw;
int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
};
static int __cmd_record(int argc, const char **argv)
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
int rec_argc, i = 0, j;
const char **rec_argv;
char event[64];
int ret;
rec_argc = argc + 4;
rec_argc = argc + 7; /* max number of arguments */
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
rec_argv[i++] = strdup("record");
if (!strcmp(mem_operation, MEM_OPERATION_LOAD))
rec_argv[i++] = strdup("-W");
rec_argv[i++] = strdup("-d");
rec_argv[i++] = strdup("-e");
rec_argv[i++] = "record";
if (strcmp(mem_operation, MEM_OPERATION_LOAD))
sprintf(event, "cpu/mem-stores/pp");
else
sprintf(event, "cpu/mem-loads/pp");
if (mem->operation & MEM_OPERATION_LOAD)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
if (mem->operation & MEM_OPERATION_LOAD) {
rec_argv[i++] = "-e";
rec_argv[i++] = "cpu/mem-loads/pp";
}
if (mem->operation & MEM_OPERATION_STORE) {
rec_argv[i++] = "-e";
rec_argv[i++] = "cpu/mem-stores/pp";
}
rec_argv[i++] = strdup(event);
for (j = 1; j < argc; j++, i++)
rec_argv[i] = argv[j];
@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
if (!rep_argv)
return -1;
rep_argv[i++] = strdup("report");
rep_argv[i++] = strdup("--mem-mode");
rep_argv[i++] = strdup("-n"); /* display number of samples */
rep_argv[i++] = "report";
rep_argv[i++] = "--mem-mode";
rep_argv[i++] = "-n"; /* display number of samples */
/*
* there is no weight (cost) associated with stores, so don't print
* the column
*/
if (strcmp(mem_operation, MEM_OPERATION_LOAD))
rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr,"
"dso_daddr,tlb,locked");
if (!(mem->operation & MEM_OPERATION_LOAD))
rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
"dso_daddr,tlb,locked";
for (j = 1; j < argc; j++, i++)
rep_argv[i] = argv[j];
@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
return ret;
}
struct mem_mode {
const char *name;
int mode;
};
#define MEM_OPT(n, m) \
{ .name = n, .mode = (m) }
#define MEM_END { .name = NULL }
static const struct mem_mode mem_modes[]={
MEM_OPT("load", MEM_OPERATION_LOAD),
MEM_OPT("store", MEM_OPERATION_STORE),
MEM_END
};
static int
parse_mem_ops(const struct option *opt, const char *str, int unset)
{
int *mode = (int *)opt->value;
const struct mem_mode *m;
char *s, *os = NULL, *p;
int ret = -1;
if (unset)
return 0;
/* str may be NULL in case no arg is passed to -t */
if (str) {
/* because str is read-only */
s = os = strdup(str);
if (!s)
return -1;
/* reset mode */
*mode = 0;
for (;;) {
p = strchr(s, ',');
if (p)
*p = '\0';
for (m = mem_modes; m->name; m++) {
if (!strcasecmp(s, m->name))
break;
}
if (!m->name) {
fprintf(stderr, "unknown sampling op %s,"
" check man page\n", s);
goto error;
}
*mode |= m->mode;
if (!p)
break;
s = p + 1;
}
}
ret = 0;
if (*mode == 0)
*mode = MEM_OPERATION_LOAD;
error:
free(os);
return ret;
}
int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct stat st;
@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
.ordered_events = true,
},
.input_name = "perf.data",
/*
* default to both load an store sampling
*/
.operation = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
};
const struct option mem_options[] = {
OPT_STRING('t', "type", &mem_operation,
"type", "memory operations(load/store)"),
OPT_CALLBACK('t', "type", &mem.operation,
"type", "memory operations(load,store) Default load,store",
parse_mem_ops),
OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
"dump raw samples in ASCII"),
OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation))
if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
usage_with_options(mem_usage, mem_options);
if (!mem.input_name || !strlen(mem.input_name)) {
@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
}
if (!strncmp(argv[0], "rec", 3))
return __cmd_record(argc, argv);
return __cmd_record(argc, argv, &mem);
else if (!strncmp(argv[0], "rep", 3))
return report_events(argc, argv, &mem);
else

View File

@ -190,16 +190,30 @@ out:
return rc;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct record *rec = container_of(tool, struct record, tool);
rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
static int process_buildids(struct record *rec)
{
struct perf_data_file *file = &rec->file;
struct perf_session *session = rec->session;
u64 start = session->header.data_offset;
u64 size = lseek(file->fd, 0, SEEK_CUR);
u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
if (size == 0)
return 0;
file->size = size;
/*
* During this process, it'll load kernel map and replace the
* dso->long_name to a real pathname it found. In this case
@ -211,9 +225,7 @@ static int process_buildids(struct record *rec)
*/
symbol_conf.ignore_vmlinux_buildid = true;
return __perf_session__process_events(session, start,
size - start,
size, &build_id__mark_dso_hit_ops);
return perf_session__process_events(session, &rec->tool);
}
static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
struct perf_data_file *file = &rec->file;
struct perf_session *session;
bool disabled = false, draining = false;
int fd;
rec->progname = argv[0];
@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
return -1;
}
fd = perf_data_file__fd(file);
rec->session = session;
record__init_features(rec);
@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (file->is_pipe) {
err = perf_header__write_pipe(file->fd);
err = perf_header__write_pipe(fd);
if (err < 0)
goto out_child;
} else {
err = perf_session__write_header(session, rec->evlist,
file->fd, false);
err = perf_session__write_header(session, rec->evlist, fd, false);
if (err < 0)
goto out_child;
}
@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* return this more properly and also
* propagate errors that now are calling die()
*/
err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
process_synthesized_event);
if (err <= 0) {
pr_err("Couldn't record tracing data.\n");
@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
goto out_child;
}
if (!quiet) {
if (!quiet)
fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
/*
* Approximate RIP event size: 24 bytes.
*/
fprintf(stderr,
"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
(double)rec->bytes_written / 1024.0 / 1024.0,
file->path,
rec->bytes_written / 24);
}
out_child:
if (forks) {
int exit_status;
@ -535,13 +538,29 @@ out_child:
} else
status = err;
/* this will be recalculated during process_buildids() */
rec->samples = 0;
if (!err && !file->is_pipe) {
rec->session->header.data_size += rec->bytes_written;
if (!rec->no_buildid)
process_buildids(rec);
perf_session__write_header(rec->session, rec->evlist,
file->fd, true);
perf_session__write_header(rec->session, rec->evlist, fd, true);
}
if (!err && !quiet) {
char samples[128];
if (rec->samples)
scnprintf(samples, sizeof(samples),
" (%" PRIu64 " samples)", rec->samples);
else
samples[0] = '\0';
fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
perf_data_file__size(file) / 1024.0 / 1024.0,
file->path, samples);
}
out_delete_session:
@ -720,6 +739,13 @@ static struct record record = {
.default_per_cpu = true,
},
},
.tool = {
.sample = process_sample_event,
.fork = perf_event__process_fork,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
},
};
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "

View File

@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb)
return perf_default_config(var, value, cb);
}
static void report__inc_stats(struct report *rep, struct hist_entry *he)
{
/*
* The @he is either of a newly created one or an existing one
* merging current sample. We only want to count a new one so
* checking ->nr_events being 1.
*/
if (he->stat.nr_events == 1)
rep->nr_entries++;
}
static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct mem_info *mi;
struct branch_info *bi;
report__inc_stats(rep, he);
if (!ui__has_annotation())
return 0;
@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep)
report__warn_kptr_restrict(rep);
evlist__for_each(session->evlist, pos)
rep->nr_entries += evsel__hists(pos)->nr_entries;
if (use_browser == 0) {
if (verbose > 3)
perf_session__fprintf(session, stdout);

View File

@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &target.cpu_list, "cpu",

View File

@ -165,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
err ? "[unknown]" : uts.release, perf_version_string);
if (use_browser <= 0)
sleep(5);
map->erange_warned = true;
}

View File

@ -929,66 +929,66 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
{ .name = "close", .errmsg = true,
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
{ .name = "connect", .errmsg = true, },
{ .name = "dup", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "dup2", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "dup3", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
{ .name = "eventfd2", .errmsg = true,
.arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
{ .name = "faccessat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "fadvise64", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fallocate", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchdir", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchmod", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchmodat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "fchown", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fchownat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "fcntl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[1] = SCA_STRARRAY, /* cmd */ },
.arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
{ .name = "fdatasync", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "flock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[1] = SCA_FLOCK, /* cmd */ }, },
{ .name = "fsetxattr", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fstat", .errmsg = true, .alias = "newfstat",
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat",
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "fstatfs", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "fsync", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "ftruncate", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "futex", .errmsg = true,
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
{ .name = "futimesat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "getdents", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "getdents64", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
.arg_scnprintf = { [0] = SCA_FD, /* fd */
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
@ -1002,7 +1002,7 @@ static struct syscall_fmt {
{ .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "linkat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "lseek", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
[2] = SCA_STRARRAY, /* whence */ },
@ -1012,9 +1012,9 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_HEX, /* start */
[2] = SCA_MADV_BHV, /* behavior */ }, },
{ .name = "mkdirat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "mknodat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "mlock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mlockall", .errmsg = true,
@ -1036,9 +1036,9 @@ static struct syscall_fmt {
{ .name = "munmap", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "name_to_handle_at", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "newfstatat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "open", .errmsg = true,
.arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "open_by_handle_at", .errmsg = true,
@ -1052,20 +1052,20 @@ static struct syscall_fmt {
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
{ .name = "pread", .errmsg = true, .alias = "pread64",
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "preadv", .errmsg = true, .alias = "pread",
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
{ .name = "pwrite", .errmsg = true, .alias = "pwrite64",
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "pwritev", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "read", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "readlinkat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "readv", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "recvfrom", .errmsg = true,
.arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "recvmmsg", .errmsg = true,
@ -1073,7 +1073,7 @@ static struct syscall_fmt {
{ .name = "recvmsg", .errmsg = true,
.arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "renameat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "rt_sigaction", .errmsg = true,
.arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
{ .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@ -1091,7 +1091,7 @@ static struct syscall_fmt {
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "shutdown", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "socket", .errmsg = true,
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
[1] = SCA_SK_TYPE, /* type */ },
@ -1102,7 +1102,7 @@ static struct syscall_fmt {
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
{ .name = "symlinkat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "tgkill", .errmsg = true,
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
{ .name = "tkill", .errmsg = true,
@ -1113,9 +1113,9 @@ static struct syscall_fmt {
{ .name = "utimensat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
{ .name = "write", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
{ .name = "writev", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
.arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
if (thread__priv(thread) == NULL)
thread__set_priv(thread, thread_trace__new());
if (thread__priv(thread) == NULL)
goto fail;
@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->trace_syscalls &&
perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
trace__sys_exit))
goto out_error_tp;
goto out_error_raw_syscalls;
if (trace->trace_syscalls)
perf_evlist__add_vfs_getname(evlist);
if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ))
goto out_error_tp;
perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
goto out_error_mem;
}
if ((trace->trace_pgfaults & TRACE_PFMIN) &&
perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
goto out_error_tp;
goto out_error_mem;
if (trace->sched &&
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
trace__sched_stat_runtime))
goto out_error_tp;
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
trace__sched_stat_runtime))
goto out_error_sched_stat_runtime;
err = perf_evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
@ -2202,8 +2203,12 @@ out:
{
char errbuf[BUFSIZ];
out_error_tp:
perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
out_error_sched_stat_runtime:
debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
goto out_error;
out_error_raw_syscalls:
debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
goto out_error;
out_error_mmap:
@ -2217,6 +2222,9 @@ out_error:
fprintf(trace->output, "%s\n", errbuf);
goto out_delete_evlist;
}
out_error_mem:
fprintf(trace->output, "Not enough memory to run!\n");
goto out_delete_evlist;
}
static int trace__replay(struct trace *trace)

View File

@ -198,6 +198,7 @@ CORE_FEATURE_TESTS = \
libpython-version \
libslang \
libunwind \
pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
@ -226,6 +227,7 @@ VF_FEATURE_TESTS = \
libelf-getphdrnum \
libelf-mmap \
libpython-version \
pthread-attr-setaffinity-np \
stackprotector-all \
timerfd \
libunwind-debug-frame \
@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1)
CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
endif
ifeq ($(feature-pthread-attr-setaffinity-np), 1)
CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
endif
ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)

View File

@ -25,6 +25,7 @@ FILES= \
test-libslang.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
test-pthread-attr-setaffinity-np.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
@ -47,6 +48,9 @@ test-all.bin:
test-hello.bin:
$(BUILD)
test-pthread-attr-setaffinity-np.bin:
$(BUILD) -Werror -lpthread
test-stackprotector-all.bin:
$(BUILD) -Werror -fstack-protector-all

View File

@ -97,6 +97,10 @@
# include "test-zlib.c"
#undef main
#define main main_test_pthread_attr_setaffinity_np
# include "test-pthread_attr_setaffinity_np.c"
#undef main
int main(int argc, char *argv[])
{
main_test_libpython();
@ -121,6 +125,7 @@ int main(int argc, char *argv[])
main_test_libdw_dwarf_unwind();
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
return 0;
}

View File

@ -0,0 +1,14 @@
#include <stdint.h>
#include <pthread.h>
int main(void)
{
int ret = 0;
pthread_attr_t thread_attr;
pthread_attr_init(&thread_attr);
/* don't care abt exact args, just the API itself in libpthread */
ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
return ret;
}

View File

@ -104,7 +104,6 @@ class Event(dict):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]

View File

@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists)
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
hist_entry__free(he);
hist_entry__delete(he);
}
}

View File

@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists)
he = rb_entry(node, struct hist_entry, rb_node);
rb_erase(node, root_out);
rb_erase(&he->rb_node_in, root_in);
hist_entry__free(he);
hist_entry__delete(he);
}
}

View File

@ -222,7 +222,6 @@ tarpkg:
@cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1
all: $(run) $(run_O) tarpkg
@echo OK

View File

@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist)
return 0;
}
static int test__checkevent_breakpoint_len(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
evsel->attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
evsel->attr.bp_len);
return 0;
}
static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
evsel->attr.bp_type);
TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
evsel->attr.bp_len);
return 0;
}
static int
test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
return test__checkevent_breakpoint_rw(evlist);
}
static int count_tracepoints(void)
{
char events_path[PATH_MAX];
@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = {
.check = test__pinned_group,
.id = 41,
},
{
.name = "mem:0/1",
.check = test__checkevent_breakpoint_len,
.id = 42,
},
{
.name = "mem:0/2:w",
.check = test__checkevent_breakpoint_len_w,
.id = 43,
},
{
.name = "mem:0/4:rw:u",
.check = test__checkevent_breakpoint_len_rw_modifier,
.id = 44
},
#if defined(__s390x__)
{
.name = "kvm-s390:kvm_s390_create_vm",
@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e)
} else {
ret = e->check(evlist);
}
perf_evlist__delete(evlist);
return ret;

View File

@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1,
if (type & PERF_SAMPLE_STACK_USER) {
COMP(user_stack.size);
if (memcmp(s1->user_stack.data, s1->user_stack.data,
if (memcmp(s1->user_stack.data, s2->user_stack.data,
s1->user_stack.size)) {
pr_debug("Samples differ at 'user_stack'\n");
return false;

View File

@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
}
annotate_browser__set_top(browser, dl, idx);
return true;
}
@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
++browser->nr_jumps;
}
}
static inline int width_jumps(int n)

View File

@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_##_field); \
}
@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_ACC_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort_acc(a, b, he_get_acc_##_field); \
}
@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
}
#define __HPP_SORT_RAW_FN(_type, _field) \
static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \
static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct hist_entry *a, struct hist_entry *b) \
{ \
return __hpp__sort(a, b, he_get_raw_##_field); \
}
@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period)
HPP_RAW_FNS(samples, nr_events)
HPP_RAW_FNS(period, period)
static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
struct hist_entry *a __maybe_unused,
struct hist_entry *b __maybe_unused)
{
return 0;

View File

@ -4,12 +4,12 @@
#include <linux/types.h>
void ui_progress__finish(void);
struct ui_progress {
const char *title;
u64 curr, next, step, total;
};
void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
void ui_progress__update(struct ui_progress *p, u64 adv);

View File

@ -9,6 +9,7 @@
#include "../libslang.h"
char ui_helpline__last_msg[1024];
bool tui_helpline__set;
static void tui_helpline__pop(void)
{
@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap)
sizeof(ui_helpline__last_msg) - backlog, format, ap);
backlog += ret;
tui_helpline__set = true;
if (ui_helpline__last_msg[backlog - 1] == '\n') {
ui_helpline__puts(ui_helpline__last_msg);
SLsmg_refresh();

View File

@ -17,6 +17,7 @@
static volatile int ui__need_resize;
extern struct perf_error_ops perf_tui_eops;
extern bool tui_helpline__set;
extern void hist_browser__init_hpp(void);
@ -159,7 +160,7 @@ out:
void ui__exit(bool wait_for_ok)
{
if (wait_for_ok)
if (wait_for_ok && tui_helpline__set)
ui__question_window("Fatal Error",
ui_helpline__last_msg,
"Press any key...", 0);

View File

@ -239,7 +239,7 @@ static int mov__parse(struct ins_operands *ops)
*s = '\0';
ops->source.raw = strdup(ops->raw);
*s = ',';
if (ops->source.raw == NULL)
return -1;

View File

@ -5,132 +5,6 @@
int perf_use_color_default = -1;
static int parse_color(const char *name, int len)
{
static const char * const color_names[] = {
"normal", "black", "red", "green", "yellow",
"blue", "magenta", "cyan", "white"
};
char *end;
int i;
for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
const char *str = color_names[i];
if (!strncasecmp(name, str, len) && !str[len])
return i - 1;
}
i = strtol(name, &end, 10);
if (end - name == len && i >= -1 && i <= 255)
return i;
return -2;
}
static int parse_attr(const char *name, int len)
{
static const int attr_values[] = { 1, 2, 4, 5, 7 };
static const char * const attr_names[] = {
"bold", "dim", "ul", "blink", "reverse"
};
unsigned int i;
for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
const char *str = attr_names[i];
if (!strncasecmp(name, str, len) && !str[len])
return attr_values[i];
}
return -1;
}
void color_parse(const char *value, const char *var, char *dst)
{
color_parse_mem(value, strlen(value), var, dst);
}
void color_parse_mem(const char *value, int value_len, const char *var,
char *dst)
{
const char *ptr = value;
int len = value_len;
int attr = -1;
int fg = -2;
int bg = -2;
if (!strncasecmp(value, "reset", len)) {
strcpy(dst, PERF_COLOR_RESET);
return;
}
/* [fg [bg]] [attr] */
while (len > 0) {
const char *word = ptr;
int val, wordlen = 0;
while (len > 0 && !isspace(word[wordlen])) {
wordlen++;
len--;
}
ptr = word + wordlen;
while (len > 0 && isspace(*ptr)) {
ptr++;
len--;
}
val = parse_color(word, wordlen);
if (val >= -1) {
if (fg == -2) {
fg = val;
continue;
}
if (bg == -2) {
bg = val;
continue;
}
goto bad;
}
val = parse_attr(word, wordlen);
if (val < 0 || attr != -1)
goto bad;
attr = val;
}
if (attr >= 0 || fg >= 0 || bg >= 0) {
int sep = 0;
*dst++ = '\033';
*dst++ = '[';
if (attr >= 0) {
*dst++ = '0' + attr;
sep++;
}
if (fg >= 0) {
if (sep++)
*dst++ = ';';
if (fg < 8) {
*dst++ = '3';
*dst++ = '0' + fg;
} else {
dst += sprintf(dst, "38;5;%d", fg);
}
}
if (bg >= 0) {
if (sep++)
*dst++ = ';';
if (bg < 8) {
*dst++ = '4';
*dst++ = '0' + bg;
} else {
dst += sprintf(dst, "48;5;%d", bg);
}
}
*dst++ = 'm';
}
*dst = 0;
return;
bad:
die("bad color value '%.*s' for variable '%s'", value_len, value, var);
}
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
{
if (value) {

View File

@ -30,8 +30,6 @@ extern int perf_use_color_default;
int perf_color_default_config(const char *var, const char *value, void *cb);
int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
void color_parse(const char *value, const char *var, char *dst);
void color_parse_mem(const char *value, int len, const char *var, char *dst);
int color_vsnprintf(char *bf, size_t size, const char *color,
const char *fmt, va_list args);
int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);

View File

@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
break;
cache_offset = offset & DSO__DATA_CACHE_MASK;
ret = -EINVAL;
if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
break;
ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
if (ret <= 0)
break;

View File

@ -139,6 +139,7 @@ struct dso {
u32 status_seen;
size_t file_size;
struct list_head open_entry;
u64 frame_offset;
} data;
union { /* Tool specific area */

View File

@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
return printed + fprintf(fp, "\n");
}
int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
int err, char *buf, size_t size)
{
char sbuf[128];
switch (err) {
case ENOENT:
scnprintf(buf, size, "%s",
"Error:\tUnable to find debugfs\n"
"Hint:\tWas your kernel compiled with debugfs support?\n"
"Hint:\tIs the debugfs filesystem mounted?\n"
"Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
break;
case EACCES:
scnprintf(buf, size,
"Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
"Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
debugfs_mountpoint, debugfs_mountpoint);
break;
default:
scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
break;
}
return 0;
}
int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
int err, char *buf, size_t size)
{

View File

@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);

View File

@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
if (opts->sample_weight)
perf_evsel__set_sample_bit(evsel, WEIGHT);
attr->task = track;
attr->mmap = track;
attr->mmap2 = track && !perf_missing_features.mmap2;
attr->comm = track;
@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
if (ncpus == 0 || nthreads == 0)
return 0;
if (evsel->system_wide)
nthreads = 1;

View File

@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
* - unique number to identify actual perf.data files
* - encode endianness of file
*/
ph->version = PERF_HEADER_VERSION_2;
/* check magic number with one endianness */
if (magic == __perf_magic2)
@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
return -1;
ph->needs_swap = true;
ph->version = PERF_HEADER_VERSION_2;
return 0;
}

View File

@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
return he->stat.period == 0;
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
rb_erase(&he->rb_node, &hists->entries);
if (sort__need_collapse)
rb_erase(&he->rb_node_in, &hists->entries_collapsed);
--hists->nr_entries;
if (!he->filtered)
--hists->nr_non_filtered_entries;
hist_entry__delete(he);
}
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
{
struct rb_node *next = rb_first(&hists->entries);
@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
(zap_kernel && n->level != '.') ||
hists__decay_entry(hists, n)) &&
!n->used) {
rb_erase(&n->rb_node, &hists->entries);
if (sort__need_collapse)
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
--hists->nr_entries;
if (!n->filtered)
--hists->nr_non_filtered_entries;
hist_entry__free(n);
hists__delete_entry(hists, n);
}
}
}
@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists)
n = rb_entry(next, struct hist_entry, rb_node);
next = rb_next(&n->rb_node);
rb_erase(&n->rb_node, &hists->entries);
if (sort__need_collapse)
rb_erase(&n->rb_node_in, &hists->entries_collapsed);
--hists->nr_entries;
if (!n->filtered)
--hists->nr_non_filtered_entries;
hist_entry__free(n);
hists__delete_entry(hists, n);
}
}
@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!he)
return NULL;
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, hists->entries_in);
out:
@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
if (perf_hpp__should_skip(fmt))
continue;
cmp = fmt->cmp(left, right);
cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
}
@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
if (perf_hpp__should_skip(fmt))
continue;
cmp = fmt->collapse(left, right);
cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
}
@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
return cmp;
}
void hist_entry__free(struct hist_entry *he)
void hist_entry__delete(struct hist_entry *he)
{
zfree(&he->branch_info);
zfree(&he->mem_info);
@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
iter->callchain,
he->callchain);
}
hist_entry__free(he);
hist_entry__delete(he);
return false;
}
@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
if (perf_hpp__should_skip(fmt))
continue;
cmp = fmt->sort(a, b);
cmp = fmt->sort(fmt, a, b);
if (cmp)
break;
}

View File

@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
int hist_entry__transaction_len(void);
int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
void hist_entry__free(struct hist_entry *);
void hist_entry__delete(struct hist_entry *he);
void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
@ -195,9 +195,12 @@ struct perf_hpp_fmt {
struct hist_entry *he);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b);
int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b);
int64_t (*sort)(struct hist_entry *a, struct hist_entry *b);
int64_t (*cmp)(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b);
int64_t (*collapse)(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b);
int64_t (*sort)(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b);
struct list_head list;
struct list_head sort_list;

View File

@ -526,7 +526,7 @@ do { \
}
int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type)
void *ptr, char *type, u64 len)
{
struct perf_event_attr attr;
@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
if (parse_breakpoint_type(type, &attr))
return -EINVAL;
/*
* We should find a nice way to override the access length
* Provide some defaults for now
*/
if (attr.bp_type == HW_BREAKPOINT_X)
attr.bp_len = sizeof(long);
else
attr.bp_len = HW_BREAKPOINT_LEN_4;
/* Provide some defaults if len is not specified */
if (!len) {
if (attr.bp_type == HW_BREAKPOINT_X)
len = sizeof(long);
else
len = HW_BREAKPOINT_LEN_4;
}
attr.bp_len = len;
attr.type = PERF_TYPE_BREAKPOINT;
attr.sample_period = 1;
@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
return;
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
if (subsys_glob != NULL &&
if (subsys_glob != NULL &&
!strglobmatch(sys_dirent.d_name, subsys_glob))
continue;
@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
continue;
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
if (event_glob != NULL &&
if (event_glob != NULL &&
!strglobmatch(evt_dirent.d_name, event_glob))
continue;
@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type,
for (i = 0; i < max; i++, syms++) {
if (event_glob != NULL &&
if (event_glob != NULL &&
!(strglobmatch(syms->symbol, event_glob) ||
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only)
printf("\n");
printf(" %-50s [%s]\n",
"mem:<addr>[:access]",
"mem:<addr>[/len][:access]",
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
printf("\n");
}

View File

@ -71,6 +71,7 @@ struct parse_events_term {
int type_val;
int type_term;
struct list_head list;
bool used;
};
struct parse_events_evlist {
@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type);
void *ptr, char *type, u64 len);
int parse_events_add_pmu(struct list_head *list, int *idx,
char *pmu , struct list_head *head_config);
enum perf_pmu_event_symbol_type

View File

@ -159,6 +159,7 @@ branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
<mem>{
{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
: { return ':'; }
"/" { return '/'; }
{num_dec} { return value(yyscanner, 10); }
{num_hex} { return value(yyscanner, 16); }
/*

View File

@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE
}
event_legacy_mem:
PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, $6, $4));
$$ = list;
}
|
PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
{
struct parse_events_evlist *data = _data;
struct list_head *list;
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, NULL, $4));
$$ = list;
}
|
PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_evlist *data = _data;
@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, $4));
(void *) $2, $4, 0));
$$ = list;
}
|
@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
ALLOC_LIST(list);
ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, NULL));
(void *) $2, NULL, 0));
$$ = list;
}

View File

@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p,
return opterror(opt, "is not usable", flags);
if (opt->flags & PARSE_OPT_EXCLUSIVE) {
if (p->excl_opt) {
if (p->excl_opt && p->excl_opt != opt) {
char msg[128];
if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||

View File

@ -550,6 +550,35 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
}
}
/*
* Term is a string term, and might be a param-term. Try to look up it's value
* in the remaining terms.
* - We have a term like "base-or-format-term=param-term",
* - We need to find the value supplied for "param-term" (with param-term named
* in a config string) later on in the term list.
*/
static int pmu_resolve_param_term(struct parse_events_term *term,
struct list_head *head_terms,
__u64 *value)
{
struct parse_events_term *t;
list_for_each_entry(t, head_terms, list) {
if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
if (!strcmp(t->config, term->config)) {
t->used = true;
*value = t->val.num;
return 0;
}
}
}
if (verbose)
printf("Required parameter '%s' not specified\n", term->config);
return -1;
}
/*
* Setup one of config[12] attr members based on the
* user input data - term parameter.
@ -557,25 +586,33 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
static int pmu_config_term(struct list_head *formats,
struct perf_event_attr *attr,
struct parse_events_term *term,
struct list_head *head_terms,
bool zero)
{
struct perf_pmu_format *format;
__u64 *vp;
__u64 val;
/*
* If this is a parameter we've already used for parameterized-eval,
* skip it in normal eval.
*/
if (term->used)
return 0;
/*
* Support only for hardcoded and numnerial terms.
* Hardcoded terms should be already in, so nothing
* to be done for them.
*/
if (parse_events__is_hardcoded_term(term))
return 0;
if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
return -EINVAL;
format = pmu_find_format(formats, term->config);
if (!format)
if (!format) {
if (verbose)
printf("Invalid event/parameter '%s'\n", term->config);
return -EINVAL;
}
switch (format->value) {
case PERF_PMU_FORMAT_VALUE_CONFIG:
@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats,
}
/*
* XXX If we ever decide to go with string values for
* non-hardcoded terms, here's the place to translate
* them into value.
* Either directly use a numeric term, or try to translate string terms
* using event parameters.
*/
pmu_format_value(format->bits, term->val.num, vp, zero);
if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
val = term->val.num;
else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
if (strcmp(term->val.str, "?")) {
if (verbose)
pr_info("Invalid sysfs entry %s=%s\n",
term->config, term->val.str);
return -EINVAL;
}
if (pmu_resolve_param_term(term, head_terms, &val))
return -EINVAL;
} else
return -EINVAL;
pmu_format_value(format->bits, val, vp, zero);
return 0;
}
@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats,
{
struct parse_events_term *term;
list_for_each_entry(term, head_terms, list)
if (pmu_config_term(formats, attr, term, zero))
list_for_each_entry(term, head_terms, list) {
if (pmu_config_term(formats, attr, term, head_terms, zero))
return -EINVAL;
}
return 0;
}
@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
set_bit(b, bits);
}
static int sub_non_neg(int a, int b)
{
if (b > a)
return 0;
return a - b;
}
static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
struct perf_pmu_alias *alias)
{
snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
struct parse_events_term *term;
int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
list_for_each_entry(term, &alias->terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
used += snprintf(buf + used, sub_non_neg(len, used),
",%s=%s", term->config,
term->val.str);
}
if (sub_non_neg(len, used) > 0) {
buf[used] = '/';
used++;
}
if (sub_non_neg(len, used) > 0) {
buf[used] = '\0';
used++;
} else
buf[len - 1] = '\0';
return buf;
}

View File

@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
Py_DECREF(file);
goto free_list;
}
Py_DECREF(file);
}

View File

@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name)
/*
* Insert val into into the dictionary and decrement the reference counter.
* This is necessary for dictionaries since PyDict_SetItemString() does not
* This is necessary for dictionaries since PyDict_SetItemString() does not
* steal a reference, as opposed to PyTuple_SetItem().
*/
static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)

View File

@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->id_index == NULL)
tool->id_index = process_id_index_stub;
}
static void swap_sample_id_all(union perf_event *event, void *data)
{
void *end = (void *) event + event->header.size;
@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session,
#define NUM_MMAPS 128
#endif
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size, struct perf_tool *tool)
static int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size,
u64 file_size, struct perf_tool *tool)
{
int fd = perf_data_file__fd(session->file);
u64 head, page_offset, file_offset, file_pos, size;

View File

@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
union perf_event **event_ptr,
struct perf_sample *sample);
int __perf_session__process_events(struct perf_session *session,
u64 data_offset, u64 data_size, u64 size,
struct perf_tool *tool);
int perf_session__process_events(struct perf_session *session,
struct perf_tool *tool);

View File

@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
}
static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
hse = container_of(fmt, struct hpp_sort_entry, hpp);
return hse->se->se_cmp(a, b);
}
static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
hse = container_of(fmt, struct hpp_sort_entry, hpp);
collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
return collapse_fn(a, b);
}
static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
struct hist_entry *a, struct hist_entry *b)
{
struct hpp_sort_entry *hse;
int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
hse = container_of(fmt, struct hpp_sort_entry, hpp);
sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
return sort_fn(a, b);
}
static struct hpp_sort_entry *
__sort_dimension__alloc_hpp(struct sort_dimension *sd)
{
@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
hse->hpp.entry = __sort__hpp_entry;
hse->hpp.color = NULL;
hse->hpp.cmp = sd->entry->se_cmp;
hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
hse->hpp.cmp = __sort__hpp_cmp;
hse->hpp.collapse = __sort__hpp_collapse;
hse->hpp.sort = __sort__hpp_sort;
INIT_LIST_HEAD(&hse->hpp.list);
INIT_LIST_HEAD(&hse->hpp.sort_list);

View File

@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name,
const char *ext = strrchr(name, '.');
char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
type != dso->symtab_type)
if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
return -1;
if (!ext || !is_supported_compression(ext + 1))
return -1;
if (!ext || !is_supported_compression(ext + 1)) {
ext = strrchr(dso->name, '.');
if (!ext || !is_supported_compression(ext + 1))
return -1;
}
fd = mkstemp(tmpbuf);
if (fd < 0)

View File

@ -685,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
struct machine *machine = kmaps->machine;
struct map *curr_map = map;
struct symbol *pos;
int count = 0, moved = 0;
int count = 0, moved = 0;
struct rb_root *root = &dso->symbols[map->type];
struct rb_node *next = rb_first(root);
int kernel_range = 0;

View File

@ -266,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
u64 *fde_count)
{
int ret = -EINVAL, fd;
u64 offset;
u64 offset = dso->data.frame_offset;
fd = dso__data_fd(dso, machine);
if (fd < 0)
return -EINVAL;
if (offset == 0) {
fd = dso__data_fd(dso, machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
offset = elf_section_offset(fd, ".eh_frame_hdr");
/* Check the .eh_frame section for unwinding info */
offset = elf_section_offset(fd, ".eh_frame_hdr");
dso->data.frame_offset = offset;
}
if (offset)
ret = unwind_spec_ehframe(dso, machine, offset,
@ -287,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
int fd = dso__data_fd(dso, machine);
int fd;
u64 ofs = dso->data.frame_offset;
if (fd < 0)
return -EINVAL;
if (ofs == 0) {
fd = dso__data_fd(dso, machine);
if (fd < 0)
return -EINVAL;
/* Check the .debug_frame section for unwinding info */
*offset = elf_section_offset(fd, ".debug_frame");
/* Check the .debug_frame section for unwinding info */
ofs = elf_section_offset(fd, ".debug_frame");
dso->data.frame_offset = ofs;
}
*offset = ofs;
if (*offset)
return 0;