alistair23-linux/tools/perf/perf.h
Ingo Molnar a448a0318a perf/urgent fixes:
. The python binding needs to link with libtraceevent and to initialize
   the 'page_size' variable so that mmaping works again.
 
 . The callchain folding character that appears on the TUI just before
   the overhead had disappeared due to recent changes, add it back.
 
 . Intel PEBS in VT-x context uses the DS address as a guest linear address,
   even though its programmed by the host as a host linear address. This either
   results in guest memory corruption and or the hardware faulting and 'crashing'
   the virtual machine.  Therefore we have to disable PEBS on VT-x enter and
   re-enable on VT-x exit, enforcing a strict exclude_guest.
 
   Kernel side enforcement fix by Peter Zijlstra, tooling side fix by David Ahern.
 
 . Fix build on sparc due to UAPI, fix from David Miller.
 
 . Fixes for the srclike sort key for unresolved symbols and when processing
   samples in JITted code, where we don't have an ELF file, just an special
   symbol table, fixes from Namhyung Kim.
 
 . Fix some leaks in libtraceevent, from Steven Rostedt.
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.14 (GNU/Linux)
 
 iQIcBAABAgAGBQJQfuZQAAoJENZQFvNTUqpAqCAQALDAEmfIfj3MHjlYuf4eO54J
 sxlyv1U1l0oi6IXE2nqEUJCmh0qtpUT386WkVynqLd4EzklQ2/RP7y1tezqDTx0b
 o7qoT1CjB064x2TrHqqDbS96kYOyjLoZaIyfNPqSHUQlyq1jeA6gDzmTNqrE6ckO
 vqW7RbwFoXRHH8YTBfalxY0KQdJd/bBvLS6tKBssiROw2wZ4B3OWRbwqPEPLgu7B
 8OrX+cm02/LYvAP63VmlDlAEyIwYSN3KQUG+fAAtjNg2spMYV7ntHHH1GOvzuO5D
 wEmsY7KrMcWUW/qxhYow0ka1cSTK8QckiS6usDBocAVioPBl2YhnFqPgSzPzqF2M
 1iqfdsGwhytLZqrVzD+9MfoHVZaBVPvwtI/iCgdPFIK2RuiBJW36SECeu5jMKynV
 Teq65Nhkr9A58+5L88pz+Ws89x/TQVINYxWGLeVsN7IVEmg+o90SK6U64+EXRUZ1
 hNZ1/4BPw5i/KBDT8jX3qDZradZfA/hZiCyYAfPCk/4AUfak69wRmMDmOcQEo7JP
 m0mSi850s//ofygf+6Bu1R/usXpJEG9LFuFsIplp+rzHuM2qeHP0ch01Mj/weHjp
 7zD1xVpuRxu35o3q+9k9YKp37G+zsyiPmL4zrk0mgmYydempHY1Z6G6ZsboQYCmF
 oN0iMavg0klkd0gjEXGH
 =DTM3
 -----END PGP SIGNATURE-----

Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes from Arnaldo Carvalho de Melo:

* The python binding needs to link with libtraceevent and to initialize
  the 'page_size' variable so that mmaping works again.

* The callchain folding character that appears on the TUI just before
  the overhead had disappeared due to recent changes, add it back.

* Intel PEBS in VT-x context uses the DS address as a guest linear address,
  even though its programmed by the host as a host linear address. This either
  results in guest memory corruption and or the hardware faulting and 'crashing'
  the virtual machine.  Therefore we have to disable PEBS on VT-x enter and
  re-enable on VT-x exit, enforcing a strict exclude_guest.

  Kernel side enforcement fix by Peter Zijlstra, tooling side fix by David Ahern.

* Fix build on sparc due to UAPI, fix from David Miller.

* Fixes for the srclike sort key for unresolved symbols and when processing
  samples in JITted code, where we don't have an ELF file, just an special
  symbol table, fixes from Namhyung Kim.

* Fix some leaks in libtraceevent, from Steven Rostedt.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-10-20 02:40:26 +02:00

249 lines
5.6 KiB
C

#ifndef _PERF_PERF_H
#define _PERF_PERF_H
struct winsize;
void get_term_dimensions(struct winsize *ws);
#if defined(__i386__)
#include "../../arch/x86/include/asm/unistd.h"
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 336
#endif
#endif
#if defined(__x86_64__)
#include "../../arch/x86/include/asm/unistd.h"
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC "model name"
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
#endif
#ifdef __powerpc__
#include "../../arch/powerpc/include/asm/unistd.h"
#define rmb() asm volatile ("sync" ::: "memory")
#define cpu_relax() asm volatile ("" ::: "memory");
#define CPUINFO_PROC "cpu"
#endif
#ifdef __s390__
#include "../../arch/s390/include/asm/unistd.h"
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory");
#endif
#ifdef __sh__
#include "../../arch/sh/include/asm/unistd.h"
#if defined(__SH4A__) || defined(__SH5__)
# define rmb() asm volatile("synco" ::: "memory")
#else
# define rmb() asm volatile("" ::: "memory")
#endif
#define cpu_relax() asm volatile("" ::: "memory")
#define CPUINFO_PROC "cpu type"
#endif
#ifdef __hppa__
#include "../../arch/parisc/include/asm/unistd.h"
#define rmb() asm volatile("" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory");
#define CPUINFO_PROC "cpu"
#endif
#ifdef __sparc__
#include "../../arch/sparc/include/uapi/asm/unistd.h"
#define rmb() asm volatile("":::"memory")
#define cpu_relax() asm volatile("":::"memory")
#define CPUINFO_PROC "cpu"
#endif
#ifdef __alpha__
#include "../../arch/alpha/include/asm/unistd.h"
#define rmb() asm volatile("mb" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory")
#define CPUINFO_PROC "cpu model"
#endif
#ifdef __ia64__
#include "../../arch/ia64/include/asm/unistd.h"
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC "model name"
#endif
#ifdef __arm__
#include "../../arch/arm/include/asm/unistd.h"
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
*/
#define rmb() ((void(*)(void))0xffff0fa0)()
#define cpu_relax() asm volatile("":::"memory")
#define CPUINFO_PROC "Processor"
#endif
#ifdef __aarch64__
#include "../../arch/arm64/include/asm/unistd.h"
#define rmb() asm volatile("dmb ld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
#ifdef __mips__
#include "../../arch/mips/include/asm/unistd.h"
#define rmb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
".set mips0" \
: /* no output */ \
: /* no input */ \
: "memory")
#define cpu_relax() asm volatile("" ::: "memory")
#define CPUINFO_PROC "cpu model"
#endif
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include "../../include/uapi/linux/perf_event.h"
#include "util/types.h"
#include <stdbool.h>
struct perf_mmap {
void *base;
int mask;
unsigned int prev;
};
static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
{
struct perf_event_mmap_page *pc = mm->base;
int head = pc->data_head;
rmb();
return head;
}
static inline void perf_mmap__write_tail(struct perf_mmap *md,
unsigned long tail)
{
struct perf_event_mmap_page *pc = md->base;
/*
* ensure all reads are done before we write the tail out.
*/
/* mb(); */
pc->data_tail = tail;
}
/*
* prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
* counters in the current task.
*/
#define PR_TASK_PERF_EVENTS_DISABLE 31
#define PR_TASK_PERF_EVENTS_ENABLE 32
#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
#endif
static inline unsigned long long rdclock(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
/*
* Pick up some kernel type conventions:
*/
#define __user
#define asmlinkage
#define unlikely(x) __builtin_expect(!!(x), 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \
typeof(y) _min2 = (y); \
(void) (&_min1 == &_min2); \
_min1 < _min2 ? _min1 : _min2; })
static inline int
sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
return syscall(__NR_perf_event_open, attr, pid, cpu,
group_fd, flags);
}
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
struct ip_callchain {
u64 nr;
u64 ips[0];
};
struct branch_flags {
u64 mispred:1;
u64 predicted:1;
u64 reserved:62;
};
struct branch_entry {
u64 from;
u64 to;
struct branch_flags flags;
};
struct branch_stack {
u64 nr;
struct branch_entry entries[0];
};
extern bool perf_host, perf_guest;
extern const char perf_version_string[];
void pthread__unblock_sigwinch(void);
#include "util/target.h"
enum perf_call_graph_mode {
CALLCHAIN_NONE,
CALLCHAIN_FP,
CALLCHAIN_DWARF
};
struct perf_record_opts {
struct perf_target target;
int call_graph;
bool group;
bool inherit_stat;
bool no_delay;
bool no_inherit;
bool no_samples;
bool pipe_output;
bool raw_samples;
bool sample_address;
bool sample_time;
bool sample_id_all_missing;
bool exclude_guest_missing;
bool period;
unsigned int freq;
unsigned int mmap_pages;
unsigned int user_freq;
u64 branch_stack;
u64 default_interval;
u64 user_interval;
u16 stack_dump_size;
};
#endif