perf/core improvements and fixes:

User visible:
 
 - 'perf probe' improvements (Masami Hiramatsu)
 
   - Support glob wildcards for function name
   - Support $params special probe argument: Collect all function arguments
   - Make --line checks validate C-style function name.
   - Add --no-inlines option to avoid searching inline functions
 
 - Introduce new 'perf bench futex' benchmark: 'wake-parallel', to
   measure parallel waker threads generating contention for kerne
   locks (hb->lock) (Davidlohr Bueso)
 
 Bug fixes:
 
 - 'perf top' survives much longer on high core count machines, more work
   needed to refcount more data structures besides 'struct thread' and fix
   more races (Arnaldo Carvalho de Melo)
 
 Infrastructure:
 
 - Move barrier.h mb/rmb/wmb API from tools/perf/ to kernel like tools/arch/
   hierarchy (Arnaldo Carvalho de Melo)
 
 - Borrow atomic.h from the kernel, initially the x86 implementations
   with a fallback to gcc intrinsics for the other arches, all the kernel
   like framework in place for doing arch specific implementations,
   preferrably cloning what is in the kernel to the greater extent
   possible (Arnaldo Carvalho de Melo)
 
 - Protect the 'struct thread' lifetime with a reference counter,
   and protect data structures that contains its instances with
   a mutex (Arnaldo Carvalho de Melo
 
 - Disable libdw DWARF unwind when built with NO_DWARF (Naveen N. Rao)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJVTSHkAAoJENZQFvNTUqpAL7sP/1r/8Gdxe80U1irZJTq/BMnN
 fGaCmpN0wXTw2iWm4a7YXlLiHcg7zLD3Wob/gKn/A2JLMnTF1n0GAPn/JPTYkEIU
 ousWw2uimQEapE5NyMQDH1rT3aYpWT8C60awcsl0XehUo/ksbrJ9Uo2btlCognDx
 7SXwrW2G6qWw65mMw1/05ivNPWdMTusCkWoqPjrwHd2/bVTyspnadCjYb8ku5CVC
 yRAXbKrvi1mi33Ms7S0iWvZOC7N7/fiX6+WdkpcjnvUFLnCUzKmhGyZgY+KcsNO/
 Cf9Pkkd9a+zUXmOHIr+Z5WpM3gi3LWEDPUMemGrfdkKuDv5RThtqYxAyVRronmJZ
 n/NWxapkREMHFiS1aftHWR/XVM4/E8dhv82hATYbh+aqVEZm1VqOS/M6qJm8PF2w
 N3lhlVWPYYhQBiTI3kn0SZNod5Ui3iPQAeUlgW1SdOcwreY5jNhGXTAWXtY74tBH
 2xxXVSDsKAWxjT1DnQ1qqvSBln0BbnKDaHvUBMXJrC52uCYMT5Y7M0cN+OWMJZnf
 YFs3D/qZu5RCoTWBaXJDY2fPIBbHwaXjfxGtMATYVoi8CWoV+8n5CO0hy6i2Faic
 xjndfZRqilL5bwArvcJ+TTtk3zDR/Q5VYUnEpcNt2Plpigr2XUQThFaoAHHQbE3H
 2Ghvo64dJ/u9utjIGSc4
 =h0Dk
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

  - 'perf probe' improvements: (Masami Hiramatsu)

    - Support glob wildcards for function name
    - Support $params special probe argument: Collect all function arguments
    - Make --line checks validate C-style function name.
    - Add --no-inlines option to avoid searching inline functions

  - Introduce new 'perf bench futex' benchmark: 'wake-parallel', to
    measure parallel waker threads generating contention for kernel
    locks (hb->lock). (Davidlohr Bueso)

Bug fixes:

  - Improve 'perf top' to survive much longer on high core count machines,
    more work needed to refcount more data structures besides 'struct thread'
    and fix more races. (Arnaldo Carvalho de Melo)

Infrastructure changes:

  - Move barrier.h mb/rmb/wmb API from tools/perf/ to kernel like tools/arch/
    hierarchy. (Arnaldo Carvalho de Melo)

  - Borrow atomic.h from the kernel, initially the x86 implementations
    with a fallback to gcc intrinsics for the other arches, all the kernel
    like framework in place for doing arch specific implementations,
    preferrably cloning what is in the kernel to the greater extent
    possible. (Arnaldo Carvalho de Melo)

  - Protect the 'struct thread' lifetime with a reference counter,
    and protect data structures that contains its instances with
    a mutex. (Arnaldo Carvalho de Melo

  - Disable libdw DWARF unwind when built with NO_DWARF (Naveen N. Rao)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-05-09 08:10:36 +02:00
commit 32b0ed3ae6
71 changed files with 1392 additions and 323 deletions

View file

@ -0,0 +1,8 @@
#ifndef __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
#define __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
#define mb() __asm__ __volatile__("mb": : :"memory")
#define rmb() __asm__ __volatile__("mb": : :"memory")
#define wmb() __asm__ __volatile__("wmb": : :"memory")
#endif /* __TOOLS_LINUX_ASM_ALPHA_BARRIER_H */

View file

@ -0,0 +1,12 @@
#ifndef _TOOLS_LINUX_ASM_ARM_BARRIER_H
#define _TOOLS_LINUX_ASM_ARM_BARRIER_H
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
*/
#define mb() ((void(*)(void))0xffff0fa0)()
#define wmb() ((void(*)(void))0xffff0fa0)()
#define rmb() ((void(*)(void))0xffff0fa0)()
#endif /* _TOOLS_LINUX_ASM_ARM_BARRIER_H */

View file

@ -0,0 +1,16 @@
#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
/*
* From tools/perf/perf-sys.h, last modified in:
* f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
*
* XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
* a case like for arm32 where we do things differently in userspace?
*/
#define mb() asm volatile("dmb ish" ::: "memory")
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */

View file

@ -0,0 +1,48 @@
/*
* Copied from the kernel sources to tools/:
*
* Memory barrier definitions. This is based on information published
* in the Processor Abstraction Layer and the System Abstraction Layer
* manual.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
*/
#ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H
#define _TOOLS_LINUX_ASM_IA64_BARRIER_H
#include <linux/compiler.h>
/*
* Macros to force memory ordering. In these descriptions, "previous"
* and "subsequent" refer to program order; "visible" means that all
* architecturally visible effects of a memory access have occurred
* (at a minimum, this means the memory has been read or written).
*
* wmb(): Guarantees that all preceding stores to memory-
* like regions are visible before any subsequent
* stores and that all following stores will be
* visible only after all previous stores.
* rmb(): Like wmb(), but for reads.
* mb(): wmb()/rmb() combo, i.e., all previous memory
* accesses are visible before all subsequent
* accesses and vice versa. This is also known as
* a "fence."
*
* Note: "mb()" and its variants cannot be used as a fence to order
* accesses to memory mapped I/O registers. For that, mf.a needs to
* be used. However, we don't want to always use mf.a because (a)
* it's (presumably) much slower than mf and (b) mf.a is supported for
* sequential memory pages only.
*/
/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
#define ia64_mf() asm volatile ("mf" ::: "memory")
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */

View file

@ -0,0 +1,20 @@
#ifndef _TOOLS_LINUX_ASM_MIPS_BARRIER_H
#define _TOOLS_LINUX_ASM_MIPS_BARRIER_H
/*
* FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
* in c1e028ef40b8d6943b767028ba17d4f2ba020edb, more work needed to make it
* more closely follow the Linux kernel arch/mips/include/asm/barrier.h file.
* Probably when we continue work on tools/ Kconfig support to have all the
* CONFIG_ needed for properly doing that.
*/
#define mb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
".set mips0" \
: /* no output */ \
: /* no input */ \
: "memory")
#define wmb() mb()
#define rmb() mb()
#endif /* _TOOLS_LINUX_ASM_MIPS_BARRIER_H */

View file

@ -0,0 +1,29 @@
/*
* Copied from the kernel sources:
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*/
#ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
#define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
*
* *mb() variants without smp_ prefix must order all types of memory
* operations with one another. sync is the only instruction sufficient
* to do this.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */

View file

@ -0,0 +1,30 @@
/*
* Copied from the kernel sources:
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __TOOLS_LINUX_ASM_BARRIER_H
#define __TOOLS_LINUX_ASM_BARRIER_H
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
/* Fast-BCR without checkpoint synchronization */
#define __ASM_BARRIER "bcr 14,0\n"
#else
#define __ASM_BARRIER "bcr 15,0\n"
#endif
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
#define rmb() mb()
#define wmb() mb()
#endif /* __TOOLS_LIB_ASM_BARRIER_H */

View file

@ -0,0 +1,32 @@
/*
* Copied from the kernel sources:
*
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Paul Mundt
*/
#ifndef __TOOLS_LINUX_ASM_SH_BARRIER_H
#define __TOOLS_LINUX_ASM_SH_BARRIER_H
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
* Legacy SH cores typically require a sequence of 8 nops after
* modification of a control register in order for the changes to take
* effect. On newer cores (like the sh4a and sh5) this is accomplished
* with icbi.
*
* Also note that on sh4a in the icbi case we can forego a synco for the
* write barrier, as it's not necessary for control registers.
*
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(__SH4A__) || defined(__SH5__)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
#endif
#include <asm-generic/barrier.h>
#endif /* __TOOLS_LINUX_ASM_SH_BARRIER_H */

View file

@ -0,0 +1,8 @@
#ifndef ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
#define ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
#if defined(__sparc__) && defined(__arch64__)
#include "barrier_64.h"
#else
#include "barrier_32.h"
#endif
#endif

View file

@ -0,0 +1,6 @@
#ifndef __TOOLS_PERF_SPARC_BARRIER_H
#define __TOOLS_PERF_SPARC_BARRIER_H
#include <asm-generic/barrier.h>
#endif /* !(__TOOLS_PERF_SPARC_BARRIER_H) */

View file

@ -0,0 +1,42 @@
#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H
#define __TOOLS_LINUX_SPARC64_BARRIER_H
/* Copied from the kernel sources to tools/:
*
* These are here in an effort to more fully work around Spitfire Errata
* #51. Essentially, if a memory barrier occurs soon after a mispredicted
* branch, the chip can stop executing instructions until a trap occurs.
* Therefore, if interrupts are disabled, the chip can hang forever.
*
* It used to be believed that the memory barrier had to be right in the
* delay slot, but a case has been traced recently wherein the memory barrier
* was one instruction after the branch delay slot and the chip still hung.
* The offending sequence was the following in sym_wakeup_done() of the
* sym53c8xx_2 driver:
*
* call sym_ccb_from_dsa, 0
* movge %icc, 0, %l0
* brz,pn %o0, .LL1303
* mov %o0, %l2
* membar #LoadLoad
*
* The branch has to be mispredicted for the bug to occur. Therefore, we put
* the memory barrier explicitly into a "branch always, predicted taken"
* delay slot to avoid the problem case.
*/
#define membar_safe(type) \
do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
" membar " type "\n" \
"1:\n" \
: : : "memory"); \
} while (0)
/* The kernel always executes in TSO memory model these days,
* and furthermore most sparc64 chips implement more stringent
* memory ordering than required by the specifications.
*/
#define mb() membar_safe("#StoreLoad")
#define rmb() __asm__ __volatile__("":::"memory")
#define wmb() __asm__ __volatile__("":::"memory")
#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */

View file

@ -0,0 +1,15 @@
#ifndef _TOOLS_LINUX_ASM_TILE_BARRIER_H
#define _TOOLS_LINUX_ASM_TILE_BARRIER_H
/*
* FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
* in 620830b6954913647b7c7f68920cf48eddf6ad92, more work needed to make it
* more closely follow the Linux kernel arch/tile/include/asm/barrier.h file.
* Probably when we continue work on tools/ Kconfig support to have all the
* CONFIG_ needed for properly doing that.
*/
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() mb()
#define rmb() mb()
#endif /* _TOOLS_LINUX_ASM_TILE_BARRIER_H */

View file

@ -0,0 +1,65 @@
#ifndef _TOOLS_LINUX_ASM_X86_ATOMIC_H
#define _TOOLS_LINUX_ASM_X86_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
#include "rmwcc.h"
#define LOCK_PREFIX "\n\tlock; "
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
}
#endif /* _TOOLS_LINUX_ASM_X86_ATOMIC_H */

View file

@ -0,0 +1,28 @@
#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
#define _TOOLS_LINUX_ASM_X86_BARRIER_H
/*
* Copied from the Linux kernel sources, and also moving code
* out from tools/perf/perf-sys.h so as to make it be located
* in a place similar as in the kernel sources.
*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#if defined(__i386__)
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */

View file

@ -0,0 +1,41 @@
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
#define _TOOLS_LINUX_ASM_X86_RMWcc
#ifdef CC_HAVE_ASM_GOTO
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
: : "m" (var), ## __VA_ARGS__ \
: "memory" : cc_label); \
return 0; \
cc_label: \
return 1; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
#else /* !CC_HAVE_ASM_GOTO */
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
char c; \
asm volatile (fullop "; set" cc " %1" \
: "+m" (var), "=qm" (c) \
: __VA_ARGS__ : "memory"); \
return c != 0; \
} while (0)
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
__GEN_RMWcc(op " " arg0, var, cc)
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
#endif /* CC_HAVE_ASM_GOTO */
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */

View file

@ -0,0 +1,18 @@
/*
* Copied from the kernel sources to tools/:
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2012 Tensilica Inc.
*/
#ifndef _TOOLS_LINUX_XTENSA_SYSTEM_H
#define _TOOLS_LINUX_XTENSA_SYSTEM_H
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
#endif /* _TOOLS_LINUX_XTENSA_SYSTEM_H */

View file

@ -0,0 +1,63 @@
#ifndef __TOOLS_ASM_GENERIC_ATOMIC_H
#define __TOOLS_ASM_GENERIC_ATOMIC_H
#include <linux/compiler.h>
#include <linux/types.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* Excerpts obtained from the Linux kernel sources.
*/
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static inline int atomic_read(const atomic_t *v)
{
return ACCESS_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic_set(atomic_t *v, int i)
{
v->counter = i;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
__sync_add_and_fetch(&v->counter, 1);
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
return __sync_sub_and_fetch(&v->counter, 1) == 0;
}
#endif /* __TOOLS_ASM_GENERIC_ATOMIC_H */

View file

@ -0,0 +1,44 @@
/*
* Copied from the kernel sources to tools/perf/:
*
* Generic barrier definitions, originally based on MN10300 definitions.
*
* It should be possible to use these on really simple architectures,
* but it serves more as a starting point for new ports.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
#define __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
* Fall back to compiler barriers if nothing better is provided.
*/
#ifndef mb
#define mb() barrier()
#endif
#ifndef rmb
#define rmb() mb()
#endif
#ifndef wmb
#define wmb() mb()
#endif
#endif /* !__ASSEMBLY__ */
#endif /* __TOOLS_LINUX_ASM_GENERIC_BARRIER_H */

View file

@ -0,0 +1,10 @@
#ifndef __TOOLS_LINUX_ASM_ATOMIC_H
#define __TOOLS_LINUX_ASM_ATOMIC_H
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/atomic.h"
#else
#include <asm-generic/atomic-gcc.h>
#endif
#endif /* __TOOLS_LINUX_ASM_ATOMIC_H */

View file

@ -0,0 +1,27 @@
#if defined(__i386__) || defined(__x86_64__)
#include "../../arch/x86/include/asm/barrier.h"
#elif defined(__arm__)
#include "../../arch/arm/include/asm/barrier.h"
#elif defined(__aarch64__)
#include "../../arch/arm64/include/asm/barrier.h"
#elif defined(__powerpc__)
#include "../../arch/powerpc/include/asm/barrier.h"
#elif defined(__s390__)
#include "../../arch/s390/include/asm/barrier.h"
#elif defined(__sh__)
#include "../../arch/sh/include/asm/barrier.h"
#elif defined(__sparc__)
#include "../../arch/sparc/include/asm/barrier.h"
#elif defined(__tile__)
#include "../../arch/tile/include/asm/barrier.h"
#elif defined(__alpha__)
#include "../../arch/alpha/include/asm/barrier.h"
#elif defined(__mips__)
#include "../../arch/mips/include/asm/barrier.h"
#elif defined(__ia64__)
#include "../../arch/ia64/include/asm/barrier.h"
#elif defined(__xtensa__)
#include "../../arch/xtensa/include/asm/barrier.h"
#else
#include <asm-generic/barrier.h>
#endif

View file

@ -0,0 +1,6 @@
#ifndef __TOOLS_LINUX_ATOMIC_H
#define __TOOLS_LINUX_ATOMIC_H
#include <asm/atomic.h>
#endif /* __TOOLS_LINUX_ATOMIC_H */

View file

@ -1,6 +1,10 @@
#ifndef _TOOLS_LINUX_COMPILER_H_
#define _TOOLS_LINUX_COMPILER_H_
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
#ifndef __always_inline
# define __always_inline inline __attribute__((always_inline))
#endif

View file

@ -60,6 +60,10 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
typedef struct {
int counter;
} atomic_t;
struct list_head {
struct list_head *next, *prev;
};

View file

@ -210,6 +210,9 @@ Suite for evaluating hash tables.
*wake*::
Suite for evaluating wake calls.
*wake-parallel*::
Suite for evaluating parallel wake calls.
*requeue*::
Suite for evaluating requeue calls.

View file

@ -83,6 +83,10 @@ OPTIONS
(Only for --vars) Show external defined variables in addition to local
variables.
--no-inlines::
(Only for --add) Search only for non-inlined functions. The functions
which do not have instances are ignored.
-F::
--funcs[=FILTER]::
Show available functions in given module or kernel. With -x/--exec,
@ -151,7 +155,7 @@ Each probe argument follows below syntax.
[NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'$vars' special argument is also available for NAME, it is expanded to the local variables which can access at given probe point.
'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.

View file

@ -1,12 +1,30 @@
tools/perf
tools/arch/alpha/include/asm/barrier.h
tools/arch/arm/include/asm/barrier.h
tools/arch/ia64/include/asm/barrier.h
tools/arch/mips/include/asm/barrier.h
tools/arch/powerpc/include/asm/barrier.h
tools/arch/s390/include/asm/barrier.h
tools/arch/sh/include/asm/barrier.h
tools/arch/sparc/include/asm/barrier.h
tools/arch/sparc/include/asm/barrier_32.h
tools/arch/sparc/include/asm/barrier_64.h
tools/arch/tile/include/asm/barrier.h
tools/arch/x86/include/asm/barrier.h
tools/arch/xtensa/include/asm/barrier.h
tools/scripts
tools/build
tools/arch/x86/include/asm/atomic.h
tools/arch/x86/include/asm/rmwcc.h
tools/lib/traceevent
tools/lib/api
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/include/asm/atomic.h
tools/include/asm/barrier.h
tools/include/asm/bug.h
tools/include/asm-generic/barrier.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
tools/include/asm-generic/bitops/const_hweight.h
@ -17,6 +35,7 @@ tools/include/asm-generic/bitops/fls64.h
tools/include/asm-generic/bitops/fls.h
tools/include/asm-generic/bitops/hweight.h
tools/include/asm-generic/bitops.h
tools/include/linux/atomic.h
tools/include/linux/bitops.h
tools/include/linux/compiler.h
tools/include/linux/export.h

View file

@ -3,6 +3,7 @@ perf-y += sched-pipe.o
perf-y += mem-memcpy.o
perf-y += futex-hash.o
perf-y += futex-wake.o
perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o

View file

@ -33,6 +33,8 @@ extern int bench_mem_memcpy(int argc, const char **argv,
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
extern int bench_futex_wake_parallel(int argc, const char **argv,
const char *prefix);
extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"

View file

@ -0,0 +1,294 @@
/*
* Copyright (C) 2015 Davidlohr Bueso.
*
* Block a bunch of threads and let parallel waker threads wakeup an
* equal amount of them. The program output reflects the avg latency
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
#include "../util/parse-options.h"
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
struct thread_data {
pthread_t worker;
unsigned int nwoken;
struct timeval runtime;
};
static unsigned int nwakes = 1;
/* all threads will block on the same futex -- hash bucket chaos ;) */
static u_int32_t futex = 0;
static pthread_t *blocked_worker;
static bool done = false, silent = false, fshared = false;
static unsigned int nblocked_threads = 0, nwaking_threads = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
static unsigned int ncpus, threads_starting;
static int futex_flag = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nblocked_threads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakers", &nwaking_threads, "Specify amount of waking threads"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
static const char * const bench_futex_wake_parallel_usage[] = {
"perf bench futex wake-parallel <options>",
NULL
};
static void *waking_workerfn(void *arg)
{
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
if (waker->nwoken != nwakes)
warnx("couldn't wakeup all tasks (%d/%d)",
waker->nwoken, nwakes);
gettimeofday(&end, NULL);
timersub(&end, &start, &waker->runtime);
pthread_exit(NULL);
return NULL;
}
static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
{
unsigned int i;
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
/* create and block all threads */
for (i = 0; i < nwaking_threads; i++) {
/*
* Thread creation order will impact per-thread latency
* as it will affect the order to acquire the hb spinlock.
* For now let the scheduler decide.
*/
if (pthread_create(&td[i].worker, &thread_attr,
waking_workerfn, (void *)&td[i]))
err(EXIT_FAILURE, "pthread_create");
}
for (i = 0; i < nwaking_threads; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
}
static void *blocked_workerfn(void *arg __maybe_unused)
{
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
while (1) { /* handle spurious wakeups */
if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}
static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
{
cpu_set_t cpu;
unsigned int i;
threads_starting = nblocked_threads;
/* create and block all threads */
for (i = 0; i < nblocked_threads; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
err(EXIT_FAILURE, "pthread_create");
}
}
static void print_run(struct thread_data *waking_worker, unsigned int run_num)
{
unsigned int i, wakeup_avg;
double waketime_avg, waketime_stddev;
struct stats __waketime_stats, __wakeup_stats;
init_stats(&__wakeup_stats);
init_stats(&__waketime_stats);
for (i = 0; i < nwaking_threads; i++) {
update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&__wakeup_stats, waking_worker[i].nwoken);
}
waketime_avg = avg_stats(&__waketime_stats);
waketime_stddev = stddev_stats(&__waketime_stats);
wakeup_avg = avg_stats(&__wakeup_stats);
printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
"in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
nblocked_threads, waketime_avg/1e3,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void print_summary(void)
{
unsigned int wakeup_avg;
double waketime_avg, waketime_stddev;
waketime_avg = avg_stats(&waketime_stats);
waketime_stddev = stddev_stats(&waketime_stats);
wakeup_avg = avg_stats(&wakeup_stats);
printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
nblocked_threads,
waketime_avg/1e3,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void do_run_stats(struct thread_data *waking_worker)
{
unsigned int i;
for (i = 0; i < nwaking_threads; i++) {
update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&wakeup_stats, waking_worker[i].nwoken);
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_wake_parallel(int argc, const char **argv,
const char *prefix __maybe_unused)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
if (argc) {
usage_with_options(bench_futex_wake_parallel_usage, options);
exit(EXIT_FAILURE);
}
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (!nblocked_threads)
nblocked_threads = ncpus;
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
nwaking_threads = nblocked_threads;
if (nblocked_threads % nwaking_threads)
errx(EXIT_FAILURE, "Must be perfectly divisible");
/*
* Each thread will wakeup nwakes tasks in
* a single futex_wait call.
*/
nwakes = nblocked_threads/nwaking_threads;
blocked_worker = calloc(nblocked_threads, sizeof(*blocked_worker));
if (!blocked_worker)
err(EXIT_FAILURE, "calloc");
if (!fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
"futex %p), %d threads waking up %d at a time.\n\n",
getpid(), nblocked_threads, fshared ? "shared":"private",
&futex, nwaking_threads, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
pthread_attr_init(&thread_attr);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
for (j = 0; j < bench_repeat && !done; j++) {
waking_worker = calloc(nwaking_threads, sizeof(*waking_worker));
if (!waking_worker)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
block_threads(blocked_worker, thread_attr);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
wakeup_threads(waking_worker, thread_attr);
for (i = 0; i < nblocked_threads; i++) {
ret = pthread_join(blocked_worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
do_run_stats(waking_worker);
if (!silent)
print_run(waking_worker, j);
free(waking_worker);
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(blocked_worker);
return ret;
}

View file

@ -60,7 +60,12 @@ static void *workerfn(void *arg __maybe_unused)
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
futex_wait(&futex1, 0, NULL, futex_flag);
while (1) {
if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
break;
}
pthread_exit(NULL);
return NULL;
}

View file

@ -84,6 +84,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
@ -92,15 +93,16 @@ static int process_sample_event(struct perf_tool *tool,
}
if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
return 0;
goto out_put;
if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
return -1;
ret = -1;
}
return 0;
out_put:
addr_location__put(&al);
return ret;
}
static int hist_entry__tty_annotate(struct hist_entry *he,

View file

@ -58,6 +58,7 @@ static struct bench mem_benchmarks[] = {
static struct bench futex_benchmarks[] = {
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
{ "wake-parallel", "Benchmark for parallel futex wake calls", bench_futex_wake_parallel },
{ "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
{ "all", "Test all futex benchmarks", NULL },
{ NULL, NULL, NULL }

View file

@ -328,6 +328,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
{
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
int ret = -1;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
@ -338,7 +339,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
if (hists__add_entry(hists, &al, sample->period,
sample->weight, sample->transaction)) {
pr_warning("problem incrementing symbol period, skipping event\n");
return -1;
goto out_put;
}
/*
@ -350,8 +351,10 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
hists->stats.total_period += sample->period;
if (!al.filtered)
hists->stats.total_non_filtered_period += sample->period;
return 0;
ret = 0;
out_put:
addr_location__put(&al);
return ret;
}
static struct perf_tool tool = {

View file

@ -365,6 +365,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
}
}
thread__put(thread);
repipe:
perf_event__repipe(tool, event, sample, machine);
return 0;

View file

@ -906,6 +906,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
@ -919,10 +920,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
return f(evsel, sample);
err = f(evsel, sample);
}
return 0;
thread__put(thread);
return err;
}
static struct perf_tool perf_kmem = {

View file

@ -651,6 +651,7 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
@ -666,9 +667,10 @@ static int process_sample_event(struct perf_tool *tool,
}
if (!handle_kvm_event(kvm, thread, evsel, sample))
return -1;
err = -1;
return 0;
thread__put(thread);
return err;
}
static int cpu_isa_config(struct perf_kvm_stat *kvm)

View file

@ -769,6 +769,7 @@ static void dump_threads(void)
t = perf_session__findnew(session, st->tid);
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
};
}
@ -810,6 +811,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
int err = 0;
struct thread *thread = machine__findnew_thread(machine, sample->pid,
sample->tid);
@ -821,10 +823,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (evsel->handler != NULL) {
tracepoint_handler f = evsel->handler;
return f(evsel, sample);
err = f(evsel, sample);
}
return 0;
thread__put(thread);
return err;
}
static void sort_result(void)

View file

@ -74,7 +74,7 @@ dump_raw_samples(struct perf_tool *tool,
}
if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
return 0;
goto out_put;
if (al.map != NULL)
al.map->dso->hit = 1;
@ -103,7 +103,8 @@ dump_raw_samples(struct perf_tool *tool,
symbol_conf.field_sep,
al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
al.sym ? al.sym->name : "???");
out_put:
addr_location__put(&al);
return 0;
}

View file

@ -50,8 +50,6 @@
static struct {
int command; /* Command short_name */
bool list_events;
bool force_add;
bool show_ext_vars;
bool uprobes;
bool quiet;
bool target_used;
@ -59,7 +57,6 @@ static struct {
struct perf_probe_event events[MAX_PROBES];
struct line_range line_range;
char *target;
int max_probe_points;
struct strfilter *filter;
} params;
@ -364,7 +361,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
"\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
#endif
opt_add_probe_event),
OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
OPT_BOOLEAN('f', "force", &probe_conf.force_add, "forcibly add events"
" with existing name"),
#ifdef HAVE_DWARF_SUPPORT
OPT_CALLBACK('L', "line", NULL,
@ -373,7 +370,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('V', "vars", NULL,
"FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
"Show accessible variables on PROBEDEF", opt_show_vars),
OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
OPT_BOOLEAN('\0', "externs", &probe_conf.show_ext_vars,
"Show external variables too (with --vars only)"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
@ -382,9 +379,11 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK('m', "module", NULL, "modname|path",
"target module name (for online) or path (for offline)",
opt_set_target),
OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
"Don't search inlined functions"),
#endif
OPT__DRY_RUN(&probe_event_dry_run),
OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
OPT_INTEGER('\0', "max-probes", &probe_conf.max_probes,
"Set how many probe points can be found for a probe."),
OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
"Show potential probe-able functions.",
@ -440,8 +439,8 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
verbose = -1;
}
if (params.max_probe_points == 0)
params.max_probe_points = MAX_PROBES;
if (probe_conf.max_probes == 0)
probe_conf.max_probes = MAX_PROBES;
/*
* Only consider the user's kernel image path if given.
@ -477,10 +476,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
NULL);
ret = show_available_vars(params.events, params.nevents,
params.max_probe_points,
params.target,
params.filter,
params.show_ext_vars);
params.filter);
if (ret < 0)
pr_err_with_code(" Error: Failed to show vars.", ret);
return ret;
@ -499,9 +495,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(probe_usage, options);
}
ret = add_perf_probe_events(params.events, params.nevents,
params.max_probe_points,
params.force_add);
ret = add_perf_probe_events(params.events, params.nevents);
if (ret < 0) {
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
@ -523,5 +517,5 @@ int cmd_probe(int argc, const char **argv, const char *prefix)
cleanup_params();
}
return ret;
return ret < 0 ? ret : 0;
}

View file

@ -142,7 +142,7 @@ static int process_sample_event(struct perf_tool *tool,
.hide_unresolved = rep->hide_unresolved,
.add_entry_cb = hist_iter__report_callback,
};
int ret;
int ret = 0;
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
@ -151,10 +151,10 @@ static int process_sample_event(struct perf_tool *tool,
}
if (rep->hide_unresolved && al.sym == NULL)
return 0;
goto out_put;
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
return 0;
goto out_put;
if (sort__mode == SORT_MODE__BRANCH)
iter.ops = &hist_iter_branch;
@ -172,7 +172,8 @@ static int process_sample_event(struct perf_tool *tool,
rep);
if (ret < 0)
pr_debug("problem adding hist entry, skipping event\n");
out_put:
addr_location__put(&al);
return ret;
}

View file

@ -770,7 +770,7 @@ static int replay_fork_event(struct perf_sched *sched,
if (child == NULL || parent == NULL) {
pr_debug("thread does not exist on fork event: child %p, parent %p\n",
child, parent);
return 0;
goto out_put;
}
if (verbose) {
@ -781,6 +781,9 @@ static int replay_fork_event(struct perf_sched *sched,
register_pid(sched, parent->tid, thread__comm_str(parent));
register_pid(sched, child->tid, thread__comm_str(child));
out_put:
thread__put(child);
thread__put(parent);
return 0;
}
@ -957,7 +960,7 @@ static int latency_switch_event(struct perf_sched *sched,
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
int cpu = sample->cpu, err = -1;
s64 delta;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@ -976,15 +979,17 @@ static int latency_switch_event(struct perf_sched *sched,
sched_out = machine__findnew_thread(machine, -1, prev_pid);
sched_in = machine__findnew_thread(machine, -1, next_pid);
if (sched_out == NULL || sched_in == NULL)
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
if (thread_atoms_insert(sched, sched_out))
return -1;
goto out_put;
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
pr_err("out-event: Internal tree error");
return -1;
goto out_put;
}
}
if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
@ -993,22 +998,25 @@ static int latency_switch_event(struct perf_sched *sched,
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
if (thread_atoms_insert(sched, sched_in))
return -1;
goto out_put;
in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
if (!in_events) {
pr_err("in-event: Internal tree error");
return -1;
goto out_put;
}
/*
* Take came in we have not heard about yet,
* add in an initial atom in runnable state:
*/
if (add_sched_out_event(in_events, 'R', timestamp))
return -1;
goto out_put;
}
add_sched_in_event(in_events, timestamp);
return 0;
err = 0;
out_put:
thread__put(sched_out);
thread__put(sched_in);
return err;
}
static int latency_runtime_event(struct perf_sched *sched,
@ -1021,23 +1029,29 @@ static int latency_runtime_event(struct perf_sched *sched,
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
int cpu = sample->cpu;
int cpu = sample->cpu, err = -1;
if (thread == NULL)
return -1;
BUG_ON(cpu >= MAX_CPUS || cpu < 0);
if (!atoms) {
if (thread_atoms_insert(sched, thread))
return -1;
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
if (!atoms) {
pr_err("in-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
return -1;
goto out_put;
}
add_runtime_event(atoms, runtime, timestamp);
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int latency_wakeup_event(struct perf_sched *sched,
@ -1050,19 +1064,22 @@ static int latency_wakeup_event(struct perf_sched *sched,
struct work_atom *atom;
struct thread *wakee;
u64 timestamp = sample->time;
int err = -1;
wakee = machine__findnew_thread(machine, -1, pid);
if (wakee == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, wakee))
return -1;
goto out_put;
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
pr_err("wakeup-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'S', timestamp))
return -1;
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
@ -1081,17 +1098,21 @@ static int latency_wakeup_event(struct perf_sched *sched,
* skip in this case.
*/
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
return 0;
goto out_ok;
sched->nr_timestamps++;
if (atom->sched_out_time > timestamp) {
sched->nr_unordered_timestamps++;
return 0;
goto out_ok;
}
atom->state = THREAD_WAIT_CPU;
atom->wake_up_time = timestamp;
return 0;
out_ok:
err = 0;
out_put:
thread__put(wakee);
return err;
}
static int latency_migrate_task_event(struct perf_sched *sched,
@ -1104,6 +1125,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *migrant;
int err = -1;
/*
* Only need to worry about migration when profiling one CPU.
@ -1112,18 +1134,20 @@ static int latency_migrate_task_event(struct perf_sched *sched,
return 0;
migrant = machine__findnew_thread(machine, -1, pid);
if (migrant == NULL)
return -1;
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
return -1;
goto out_put;
register_pid(sched, migrant->tid, thread__comm_str(migrant));
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
return -1;
goto out_put;
}
if (add_sched_out_event(atoms, 'R', timestamp))
return -1;
goto out_put;
}
BUG_ON(list_empty(&atoms->work_list));
@ -1135,8 +1159,10 @@ static int latency_migrate_task_event(struct perf_sched *sched,
if (atom->sched_out_time > timestamp)
sched->nr_unordered_timestamps++;
return 0;
err = 0;
out_put:
thread__put(migrant);
return err;
}
static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
@ -1330,8 +1356,10 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
}
sched_in = machine__findnew_thread(machine, -1, next_pid);
if (sched_in == NULL)
return -1;
sched->curr_thread[this_cpu] = sched_in;
sched->curr_thread[this_cpu] = thread__get(sched_in);
printf(" ");
@ -1381,6 +1409,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
printf("\n");
}
thread__put(sched_in);
return 0;
}

View file

@ -607,13 +607,14 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
}
if (al.filtered)
return 0;
goto out_put;
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
goto out_put;
scripting_ops->process_event(event, sample, evsel, &al);
out_put:
addr_location__put(&al);
return 0;
}
@ -681,8 +682,8 @@ static int process_comm_event(struct perf_tool *tool,
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
ret = 0;
out:
thread__put(thread);
return ret;
}
@ -713,6 +714,7 @@ static int process_fork_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
@ -721,6 +723,7 @@ static int process_exit_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
int err = 0;
struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
@ -742,9 +745,10 @@ static int process_exit_event(struct perf_tool *tool,
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
return -1;
err = -1;
return 0;
thread__put(thread);
return err;
}
static int process_mmap_event(struct perf_tool *tool,
@ -774,7 +778,7 @@ static int process_mmap_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}
@ -805,7 +809,7 @@ static int process_mmap2_event(struct perf_tool *tool,
}
print_sample_start(sample, thread, evsel);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
}

View file

@ -523,7 +523,7 @@ static const char *cat_backtrace(union perf_event *event,
* Discard all.
*/
zfree(&p);
goto exit;
goto exit_put;
}
continue;
}
@ -538,7 +538,8 @@ static const char *cat_backtrace(union perf_event *event,
else
fprintf(f, "..... %016" PRIx64 "\n", ip);
}
exit_put:
addr_location__put(&al);
exit:
fclose(f);

View file

@ -793,7 +793,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
pthread_mutex_unlock(&hists->lock);
}
return;
addr_location__put(&al);
}
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)

View file

@ -1712,7 +1712,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
void *args;
size_t printed = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@ -1725,14 +1725,14 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
ttrace->entry_str = malloc(1024);
if (!ttrace->entry_str)
return -1;
goto out_put;
}
if (!trace->summary_only)
@ -1757,8 +1757,10 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
thread__put(trace->current);
trace->current = thread__get(thread);
}
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
@ -1768,7 +1770,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
long ret;
u64 duration = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample);
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@ -1781,7 +1783,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
if (trace->summary)
thread__update_stats(ttrace, id, sample);
@ -1835,8 +1837,10 @@ signed_print:
fputc('\n', trace->output);
out:
ttrace->entry_pending = false;
return 0;
err = 0;
out_put:
thread__put(thread);
return err;
}
static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
@ -1863,6 +1867,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
ttrace->runtime_ms += runtime_ms;
trace->runtime_ms += runtime_ms;
thread__put(thread);
return 0;
out_dump:
@ -1872,6 +1877,7 @@ out_dump:
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
runtime,
perf_evsel__intval(evsel, sample, "vruntime"));
thread__put(thread);
return 0;
}
@ -1924,11 +1930,12 @@ static int trace__pgfault(struct trace *trace,
struct addr_location al;
char map_type = 'd';
struct thread_trace *ttrace;
int err = -1;
thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
return -1;
goto out_put;
if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
ttrace->pfmaj++;
@ -1936,7 +1943,7 @@ static int trace__pgfault(struct trace *trace,
ttrace->pfmin++;
if (trace->summary_only)
return 0;
goto out;
thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
sample->ip, &al);
@ -1967,8 +1974,11 @@ static int trace__pgfault(struct trace *trace,
print_location(trace->output, sample, &al, true, false);
fprintf(trace->output, " (%c%c)\n", map_type, al.level);
return 0;
out:
err = 0;
out_put:
thread__put(thread);
return err;
}
static bool skip_sample(struct trace *trace, struct perf_sample *sample)

View file

@ -268,6 +268,10 @@ else
endif # libelf support
endif # NO_LIBELF
ifdef NO_DWARF
NO_LIBDW_DWARF_UNWIND := 1
endif
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf

View file

@ -6,11 +6,9 @@
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
#include <asm/barrier.h>
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
@ -25,9 +23,6 @@
#endif
#if defined(__x86_64__)
#define mb() asm volatile("mfence" ::: "memory")
#define wmb() asm volatile("sfence" ::: "memory")
#define rmb() asm volatile("lfence" ::: "memory")
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
#ifndef __NR_perf_event_open
@ -43,129 +38,63 @@
#ifdef __powerpc__
#include "../../arch/powerpc/include/uapi/asm/unistd.h"
#define mb() asm volatile ("sync" ::: "memory")
#define wmb() asm volatile ("sync" ::: "memory")
#define rmb() asm volatile ("sync" ::: "memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __s390__
#define mb() asm volatile("bcr 15,0" ::: "memory")
#define wmb() asm volatile("bcr 15,0" ::: "memory")
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define CPUINFO_PROC {"vendor_id"}
#endif
#ifdef __sh__
#if defined(__SH4A__) || defined(__SH5__)
# define mb() asm volatile("synco" ::: "memory")
# define wmb() asm volatile("synco" ::: "memory")
# define rmb() asm volatile("synco" ::: "memory")
#else
# define mb() asm volatile("" ::: "memory")
# define wmb() asm volatile("" ::: "memory")
# define rmb() asm volatile("" ::: "memory")
#endif
#define CPUINFO_PROC {"cpu type"}
#endif
#ifdef __hppa__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __sparc__
#ifdef __LP64__
#define mb() asm volatile("ba,pt %%xcc, 1f\n" \
"membar #StoreLoad\n" \
"1:\n":::"memory")
#else
#define mb() asm volatile("":::"memory")
#endif
#define wmb() asm volatile("":::"memory")
#define rmb() asm volatile("":::"memory")
#define CPUINFO_PROC {"cpu"}
#endif
#ifdef __alpha__
#define mb() asm volatile("mb" ::: "memory")
#define wmb() asm volatile("wmb" ::: "memory")
#define rmb() asm volatile("mb" ::: "memory")
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __ia64__
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
#define CPUINFO_PROC {"model name"}
#endif
#ifdef __arm__
/*
* Use the __kuser_memory_barrier helper in the CPU helper page. See
* arch/arm/kernel/entry-armv.S in the kernel source for details.
*/
#define mb() ((void(*)(void))0xffff0fa0)()
#define wmb() ((void(*)(void))0xffff0fa0)()
#define rmb() ((void(*)(void))0xffff0fa0)()
#define CPUINFO_PROC {"model name", "Processor"}
#endif
#ifdef __aarch64__
#define mb() asm volatile("dmb ish" ::: "memory")
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
#ifdef __mips__
#define mb() asm volatile( \
".set mips2\n\t" \
"sync\n\t" \
".set mips0" \
: /* no output */ \
: /* no input */ \
: "memory")
#define wmb() mb()
#define rmb() mb()
#define CPUINFO_PROC {"cpu model"}
#endif
#ifdef __arc__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"Processor"}
#endif
#ifdef __metag__
#define mb() asm volatile("" ::: "memory")
#define wmb() asm volatile("" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"CPU"}
#endif
#ifdef __xtensa__
#define mb() asm volatile("memw" ::: "memory")
#define wmb() asm volatile("memw" ::: "memory")
#define rmb() asm volatile("" ::: "memory")
#define CPUINFO_PROC {"core ID"}
#endif
#ifdef __tile__
#define mb() asm volatile ("mf" ::: "memory")
#define wmb() asm volatile ("mf" ::: "memory")
#define rmb() asm volatile ("mf" ::: "memory")
#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
#define CPUINFO_PROC {"model name"}
#endif
#define barrier() asm volatile ("" ::: "memory")
#ifndef cpu_relax
#define cpu_relax() barrier()
#endif

View file

@ -248,6 +248,7 @@ static int process_sample_event(struct machine *machine,
struct perf_sample sample;
struct thread *thread;
u8 cpumode;
int ret;
if (perf_evlist__parse_sample(evlist, event, &sample)) {
pr_debug("perf_evlist__parse_sample failed\n");
@ -262,7 +263,9 @@ static int process_sample_event(struct machine *machine,
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
return read_object_code(sample.ip, READLEN, cpumode, thread, state);
ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
thread__put(thread);
return ret;
}
static int process_event(struct machine *machine, struct perf_evlist *evlist,
@ -457,13 +460,13 @@ static int do_test_code_reading(bool try_kcore)
thread = machine__findnew_thread(machine, pid, pid);
if (!thread) {
pr_debug("machine__findnew_thread failed\n");
goto out_err;
goto out_put;
}
cpus = cpu_map__new(NULL);
if (!cpus) {
pr_debug("cpu_map__new failed\n");
goto out_err;
goto out_put;
}
while (1) {
@ -472,7 +475,7 @@ static int do_test_code_reading(bool try_kcore)
evlist = perf_evlist__new();
if (!evlist) {
pr_debug("perf_evlist__new failed\n");
goto out_err;
goto out_put;
}
perf_evlist__set_maps(evlist, cpus, threads);
@ -485,7 +488,7 @@ static int do_test_code_reading(bool try_kcore)
ret = parse_events(evlist, str, NULL);
if (ret < 0) {
pr_debug("parse_events failed\n");
goto out_err;
goto out_put;
}
perf_evlist__config(evlist, &opts);
@ -506,7 +509,7 @@ static int do_test_code_reading(bool try_kcore)
continue;
}
pr_debug("perf_evlist__open failed\n");
goto out_err;
goto out_put;
}
break;
}
@ -514,7 +517,7 @@ static int do_test_code_reading(bool try_kcore)
ret = perf_evlist__mmap(evlist, UINT_MAX, false);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
goto out_err;
goto out_put;
}
perf_evlist__enable(evlist);
@ -525,7 +528,7 @@ static int do_test_code_reading(bool try_kcore)
ret = process_events(machine, evlist, &state);
if (ret < 0)
goto out_err;
goto out_put;
if (!have_vmlinux && !have_kcore && !try_kcore)
err = TEST_CODE_READING_NO_KERNEL_OBJ;
@ -535,7 +538,10 @@ static int do_test_code_reading(bool try_kcore)
err = TEST_CODE_READING_NO_ACCESS;
else
err = TEST_CODE_READING_OK;
out_put:
thread__put(thread);
out_err:
if (evlist) {
perf_evlist__delete(evlist);
} else {

View file

@ -170,6 +170,7 @@ int test__dwarf_unwind(void)
}
err = krava_1(thread);
thread__put(thread);
out:
machine__delete_threads(machine);

View file

@ -96,6 +96,7 @@ struct machine *setup_fake_machine(struct machines *machines)
goto out;
thread__set_comm(thread, fake_threads[i].comm, 0);
thread__put(thread);
}
for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {

View file

@ -105,8 +105,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;

View file

@ -82,8 +82,10 @@ static int add_hist_entries(struct perf_evlist *evlist,
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;

View file

@ -91,8 +91,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL)
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_common_samples[k].thread = al.thread;
fake_common_samples[k].map = al.map;
@ -115,8 +117,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
he = __hists__add_entry(hists, &al, NULL,
NULL, NULL, 1, 1, 0, true);
if (he == NULL)
if (he == NULL) {
addr_location__put(&al);
goto out;
}
fake_samples[i][k].thread = al.thread;
fake_samples[i][k].map = al.map;

View file

@ -71,8 +71,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
goto out;
if (hist_entry_iter__add(&iter, &al, evsel, &sample,
PERF_MAX_STACK_DEPTH, NULL) < 0)
PERF_MAX_STACK_DEPTH, NULL) < 0) {
addr_location__put(&al);
goto out;
}
fake_samples[i].thread = al.thread;
fake_samples[i].map = al.map;

View file

@ -191,6 +191,8 @@ static int mmap_events(synth_cb synth)
PERF_RECORD_MISC_USER, MAP__FUNCTION,
(unsigned long) (td->map + 1), &al);
thread__put(thread);
if (!al.map) {
pr_debug("failed, couldn't find map\n");
err = -1;

View file

@ -64,22 +64,22 @@ int test__thread_mg_share(void)
TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
/* release thread group */
thread__delete(leader);
thread__put(leader);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3);
thread__delete(t1);
thread__put(t1);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2);
thread__delete(t2);
thread__put(t2);
TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1);
thread__delete(t3);
thread__put(t3);
/* release other group */
thread__delete(other_leader);
thread__put(other_leader);
TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1);
thread__delete(other);
thread__put(other);
/*
* Cannot call machine__delete_threads(machine) now,

View file

@ -43,6 +43,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
if (al.map != NULL)
al.map->dso->hit = 1;
thread__put(thread);
return 0;
}
@ -59,8 +60,10 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
if (thread)
if (thread) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
return 0;
}

View file

@ -122,6 +122,7 @@ int db_export__machine(struct db_export *dbe, struct machine *machine)
int db_export__thread(struct db_export *dbe, struct thread *thread,
struct machine *machine, struct comm *comm)
{
struct thread *main_thread;
u64 main_thread_db_id = 0;
int err;
@ -131,8 +132,6 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
thread->db_id = ++dbe->thread_last_db_id;
if (thread->pid_ != -1) {
struct thread *main_thread;
if (thread->pid_ == thread->tid) {
main_thread = thread;
} else {
@ -144,14 +143,16 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
err = db_export__thread(dbe, main_thread, machine,
comm);
if (err)
return err;
goto out_put;
if (comm) {
err = db_export__comm_thread(dbe, comm, thread);
if (err)
return err;
goto out_put;
}
}
main_thread_db_id = main_thread->db_id;
if (main_thread != thread)
thread__put(main_thread);
}
if (dbe->export_thread)
@ -159,6 +160,10 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
machine);
return 0;
out_put:
thread__put(main_thread);
return err;
}
int db_export__comm(struct db_export *dbe, struct comm *comm,
@ -303,6 +308,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
if (err)
return err;
/* FIXME: check refcounting for get_main_thread, that calls machine__find_thread... */
main_thread = get_main_thread(al->machine, thread);
if (main_thread)
comm = machine__thread_exec_comm(al->machine, main_thread);

View file

@ -139,10 +139,26 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
{
const char *name;
name = dwarf_diename(dw_die);
return name ? (strcmp(tname, name) == 0) : false;
}
/**
* die_match_name - Match diename and glob
* @dw_die: a DIE
* @glob: a string of target glob pattern
*
* Glob matching the name of @dw_die and @glob. Return false if matching fail.
*/
bool die_match_name(Dwarf_Die *dw_die, const char *glob)
{
const char *name;
name = dwarf_diename(dw_die);
return name ? strglobmatch(name, glob) : false;
}
/**
* die_get_call_lineno - Get callsite line number of inline-function instance
* @in_die: a DIE of an inlined function instance

View file

@ -47,6 +47,9 @@ extern bool die_is_func_instance(Dwarf_Die *dw_die);
/* Compare diename and tname */
extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
/* Matching diename with glob pattern */
extern bool die_match_name(Dwarf_Die *dw_die, const char *glob);
/* Get callsite line number of inline-function instance */
extern int die_get_call_lineno(Dwarf_Die *in_die);

View file

@ -919,6 +919,10 @@ void thread__find_addr_location(struct thread *thread,
al->sym = NULL;
}
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
*/
int perf_event__preprocess_sample(const union perf_event *event,
struct machine *machine,
struct addr_location *al,
@ -979,6 +983,17 @@ int perf_event__preprocess_sample(const union perf_event *event,
return 0;
}
/*
* The preprocess_sample method will return with reference counts for the
* in it, when done using (and perhaps getting ref counts if needing to
* keep a pointer to one of those entries) it must be paired with
* addr_location__put(), so that the refcounts can be decremented.
*/
void addr_location__put(struct addr_location *al)
{
thread__zput(al->thread);
}
bool is_bts_event(struct perf_event_attr *attr)
{
return attr->type == PERF_TYPE_HARDWARE &&

View file

@ -426,6 +426,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
struct addr_location *al,
struct perf_sample *sample);
void addr_location__put(struct addr_location *al);
struct thread;
bool is_bts_event(struct perf_event_attr *attr);

View file

@ -14,6 +14,8 @@
#include "unwind.h"
#include "linux/hash.h"
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
static void dsos__init(struct dsos *dsos)
{
INIT_LIST_HEAD(&dsos->head);
@ -28,6 +30,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
dsos__init(&machine->kernel_dsos);
machine->threads = RB_ROOT;
pthread_rwlock_init(&machine->threads_lock, NULL);
INIT_LIST_HEAD(&machine->dead_threads);
machine->last_match = NULL;
@ -54,6 +57,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
snprintf(comm, sizeof(comm), "[guest/%d]", pid);
thread__set_comm(thread, comm, 0);
thread__put(thread);
}
machine->current_tid = NULL;
@ -91,14 +95,17 @@ static void dsos__delete(struct dsos *dsos)
void machine__delete_threads(struct machine *machine)
{
struct rb_node *nd = rb_first(&machine->threads);
struct rb_node *nd;
pthread_rwlock_wrlock(&machine->threads_lock);
nd = rb_first(&machine->threads);
while (nd) {
struct thread *t = rb_entry(nd, struct thread, rb_node);
nd = rb_next(nd);
machine__remove_thread(machine, t);
__machine__remove_thread(machine, t, false);
}
pthread_rwlock_unlock(&machine->threads_lock);
}
void machine__exit(struct machine *machine)
@ -109,6 +116,7 @@ void machine__exit(struct machine *machine)
vdso__exit(machine);
zfree(&machine->root_dir);
zfree(&machine->current_tid);
pthread_rwlock_destroy(&machine->threads_lock);
}
void machine__delete(struct machine *machine)
@ -303,7 +311,7 @@ static void machine__update_thread_pid(struct machine *machine,
if (th->pid_ == th->tid)
return;
leader = machine__findnew_thread(machine, th->pid_, th->pid_);
leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
if (!leader)
goto out_err;
@ -336,9 +344,9 @@ out_err:
pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
}
static struct thread *__machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
static struct thread *____machine__findnew_thread(struct machine *machine,
pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &machine->threads.rb_node;
struct rb_node *parent = NULL;
@ -393,6 +401,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
*/
if (thread__init_map_groups(th, machine)) {
rb_erase(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
thread__delete(th);
return NULL;
}
@ -406,16 +415,30 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
return th;
}
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
{
return ____machine__findnew_thread(machine, pid, tid, true);
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
return __machine__findnew_thread(machine, pid, tid, true);
struct thread *th;
pthread_rwlock_wrlock(&machine->threads_lock);
th = thread__get(__machine__findnew_thread(machine, pid, tid));
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
return __machine__findnew_thread(machine, pid, tid, false);
struct thread *th;
pthread_rwlock_rdlock(&machine->threads_lock);
th = thread__get(____machine__findnew_thread(machine, pid, tid, false));
pthread_rwlock_unlock(&machine->threads_lock);
return th;
}
struct comm *machine__thread_exec_comm(struct machine *machine,
@ -434,6 +457,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
event->comm.pid,
event->comm.tid);
bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
int err = 0;
if (exec)
machine->comm_exec = true;
@ -444,10 +468,12 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
if (thread == NULL ||
__thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
return -1;
err = -1;
}
return 0;
thread__put(thread);
return err;
}
int machine__process_lost_event(struct machine *machine __maybe_unused,
@ -591,12 +617,16 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
size_t ret = 0;
struct rb_node *nd;
pthread_rwlock_rdlock(&machine->threads_lock);
for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
struct thread *pos = rb_entry(nd, struct thread, rb_node);
ret += thread__fprintf(pos, fp);
}
pthread_rwlock_unlock(&machine->threads_lock);
return ret;
}
@ -1213,11 +1243,14 @@ int machine__process_mmap2_event(struct machine *machine,
event->mmap2.filename, type, thread);
if (map == NULL)
goto out_problem;
goto out_problem_map;
thread__insert_map(thread, map);
thread__put(thread);
return 0;
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
return 0;
@ -1260,31 +1293,45 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
type, thread);
if (map == NULL)
goto out_problem;
goto out_problem_map;
thread__insert_map(thread, map);
thread__put(thread);
return 0;
out_problem_map:
thread__put(thread);
out_problem:
dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
return 0;
}
void machine__remove_thread(struct machine *machine, struct thread *th)
static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
{
if (machine->last_match == th)
thread__zput(machine->last_match);
BUG_ON(th->refcnt.counter == 0);
if (lock)
pthread_rwlock_wrlock(&machine->threads_lock);
rb_erase(&th->rb_node, &machine->threads);
RB_CLEAR_NODE(&th->rb_node);
/*
* Move it first to the dead_threads list, then drop the reference,
* if this is the last reference, then the thread__delete destructor
* will be called and we will remove it from the dead_threads list.
*/
list_add_tail(&th->node, &machine->dead_threads);
if (lock)
pthread_rwlock_unlock(&machine->threads_lock);
thread__put(th);
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
return __machine__remove_thread(machine, th, true);
}
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
@ -1294,10 +1341,13 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
int err = 0;
/* if a thread currently exists for the thread id remove it */
if (thread != NULL)
if (thread != NULL) {
machine__remove_thread(machine, thread);
thread__put(thread);
}
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
@ -1307,10 +1357,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
return -1;
err = -1;
}
thread__put(thread);
thread__put(parent);
return 0;
return err;
}
int machine__process_exit_event(struct machine *machine, union perf_event *event,
@ -1323,8 +1375,10 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
if (dump_trace)
perf_event__fprintf_task(event, stdout);
if (thread != NULL)
if (thread != NULL) {
thread__exited(thread);
thread__put(thread);
}
return 0;
}
@ -1841,6 +1895,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
return -ENOMEM;
thread->cpu = cpu;
thread__put(thread);
return 0;
}

View file

@ -30,6 +30,7 @@ struct machine {
bool comm_exec;
char *root_dir;
struct rb_root threads;
pthread_rwlock_t threads_lock;
struct list_head dead_threads;
struct thread *last_match;
struct vdso_info *vdso_info;
@ -151,8 +152,8 @@ static inline bool machine__is_host(struct machine *machine)
return machine ? machine->pid == HOST_KERNEL_ID : false;
}
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
pid_t tid);
struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
size_t machine__fprintf(struct machine *machine, FILE *fp);

View file

@ -51,6 +51,7 @@
#define PERFPROBE_GROUP "probe"
bool probe_event_dry_run; /* Dry run flag */
struct probe_conf probe_conf;
#define semantic_error(msg ...) pr_err("Semantic error :" msg)
@ -344,15 +345,14 @@ out:
static int get_alternative_probe_event(struct debuginfo *dinfo,
struct perf_probe_event *pev,
struct perf_probe_point *tmp,
const char *target)
struct perf_probe_point *tmp)
{
int ret;
memcpy(tmp, &pev->point, sizeof(*tmp));
memset(&pev->point, 0, sizeof(pev->point));
ret = find_alternative_probe_point(dinfo, tmp, &pev->point,
target, pev->uprobes);
pev->target, pev->uprobes);
if (ret < 0)
memcpy(&pev->point, tmp, sizeof(*tmp));
@ -557,8 +557,9 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
bool uprobe)
{
struct ref_reloc_sym *reloc_sym;
u64 etext_addr;
char *tmp;
int i;
int i, skipped = 0;
if (uprobe)
return add_exec_to_probe_trace_events(tevs, ntevs, module);
@ -572,33 +573,45 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
pr_warning("Relocated base symbol is not found!\n");
return -EINVAL;
}
/* Get the address of _etext for checking non-probable text symbol */
etext_addr = kernel_get_symbol_address_by_name("_etext", false);
for (i = 0; i < ntevs; i++) {
if (tevs[i].point.address && !tevs[i].point.retprobe) {
tmp = strdup(reloc_sym->name);
if (!tmp)
return -ENOMEM;
free(tevs[i].point.symbol);
/* If we found a wrong one, mark it by NULL symbol */
if (etext_addr < tevs[i].point.address) {
pr_warning("%s+%lu is out of .text, skip it.\n",
tevs[i].point.symbol, tevs[i].point.offset);
tmp = NULL;
skipped++;
} else {
tmp = strdup(reloc_sym->name);
if (!tmp)
return -ENOMEM;
}
/* If we have no realname, use symbol for it */
if (!tevs[i].point.realname)
tevs[i].point.realname = tevs[i].point.symbol;
else
free(tevs[i].point.symbol);
tevs[i].point.symbol = tmp;
tevs[i].point.offset = tevs[i].point.address -
reloc_sym->unrelocated_addr;
}
}
return 0;
return skipped;
}
/* Try to find perf_probe_event with debuginfo */
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs, const char *target)
struct probe_trace_event **tevs)
{
bool need_dwarf = perf_probe_event_need_dwarf(pev);
struct perf_probe_point tmp;
struct debuginfo *dinfo;
int ntevs, ret = 0;
dinfo = open_debuginfo(target, !need_dwarf);
dinfo = open_debuginfo(pev->target, !need_dwarf);
if (!dinfo) {
if (need_dwarf)
return -ENOENT;
@ -608,13 +621,12 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
pr_debug("Try to find probe point from debuginfo.\n");
/* Searching trace events corresponding to a probe event */
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
if (ntevs == 0) { /* Not found, retry with an alternative */
ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
ret = get_alternative_probe_event(dinfo, pev, &tmp);
if (!ret) {
ntevs = debuginfo__find_trace_events(dinfo, pev,
tevs, max_tevs);
ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
/*
* Write back to the original probe_event for
* setting appropriate (user given) event name
@ -629,12 +641,15 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
ret = post_process_probe_trace_events(*tevs, ntevs,
target, pev->uprobes);
if (ret < 0) {
pev->target, pev->uprobes);
if (ret < 0 || ret == ntevs) {
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
}
return ret < 0 ? ret : ntevs;
if (ret != ntevs)
return ret < 0 ? ret : ntevs;
ntevs = 0;
/* Fall through */
}
if (ntevs == 0) { /* No error but failed to find probe point. */
@ -809,8 +824,7 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
static int show_available_vars_at(struct debuginfo *dinfo,
struct perf_probe_event *pev,
int max_vls, struct strfilter *_filter,
bool externs, const char *target)
struct strfilter *_filter)
{
char *buf;
int ret, i, nvars;
@ -824,13 +838,12 @@ static int show_available_vars_at(struct debuginfo *dinfo,
return -EINVAL;
pr_debug("Searching variables at %s\n", buf);
ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
max_vls, externs);
ret = debuginfo__find_available_vars_at(dinfo, pev, &vls);
if (!ret) { /* Not found, retry with an alternative */
ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
ret = get_alternative_probe_event(dinfo, pev, &tmp);
if (!ret) {
ret = debuginfo__find_available_vars_at(dinfo, pev,
&vls, max_vls, externs);
&vls);
/* Release the old probe_point */
clear_perf_probe_point(&tmp);
}
@ -877,8 +890,7 @@ end:
/* Show available variables on given probe point */
int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_vls, const char *module,
struct strfilter *_filter, bool externs)
struct strfilter *_filter)
{
int i, ret = 0;
struct debuginfo *dinfo;
@ -887,7 +899,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
if (ret < 0)
return ret;
dinfo = open_debuginfo(module, false);
dinfo = open_debuginfo(pevs->target, false);
if (!dinfo) {
ret = -ENOENT;
goto out;
@ -896,8 +908,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
setup_pager();
for (i = 0; i < npevs && ret >= 0; i++)
ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter,
externs, module);
ret = show_available_vars_at(dinfo, &pevs[i], _filter);
debuginfo__delete(dinfo);
out:
@ -916,9 +927,7 @@ find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
}
static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs __maybe_unused,
int max_tevs __maybe_unused,
const char *target __maybe_unused)
struct probe_trace_event **tevs __maybe_unused)
{
if (perf_probe_event_need_dwarf(pev)) {
pr_warning("Debuginfo-analysis is not supported.\n");
@ -937,10 +946,8 @@ int show_line_range(struct line_range *lr __maybe_unused,
}
int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
int npevs __maybe_unused, int max_vls __maybe_unused,
const char *module __maybe_unused,
struct strfilter *filter __maybe_unused,
bool externs __maybe_unused)
int npevs __maybe_unused,
struct strfilter *filter __maybe_unused)
{
pr_warning("Debuginfo-analysis is not supported.\n");
return -ENOSYS;
@ -980,6 +987,18 @@ static int parse_line_num(char **ptr, int *val, const char *what)
return 0;
}
/* Check the name is good for event, group or function */
static bool is_c_func_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return false;
while (*++name != '\0') {
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
return false;
}
return true;
}
/*
* Stuff 'lr' according to the line range described by 'arg'.
* The line range syntax is described by:
@ -1048,10 +1067,15 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
goto err;
}
lr->function = name;
} else if (strchr(name, '.'))
} else if (strchr(name, '/') || strchr(name, '.'))
lr->file = name;
else
else if (is_c_func_name(name))/* We reuse it for checking funcname */
lr->function = name;
else { /* Invalid name */
semantic_error("'%s' is not a valid function name.\n", name);
err = -EINVAL;
goto err;
}
return 0;
err:
@ -1059,18 +1083,6 @@ err:
return err;
}
/* Check the name is good for event/group */
static bool check_event_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return false;
while (*++name != '\0') {
if (!isalpha(*name) && !isdigit(*name) && *name != '_')
return false;
}
return true;
}
/* Parse probepoint definition. */
static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
{
@ -1094,7 +1106,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
semantic_error("Group name is not supported yet.\n");
return -ENOTSUP;
}
if (!check_event_name(arg)) {
if (!is_c_func_name(arg)) {
semantic_error("%s is bad for event name -it must "
"follow C symbol-naming rule.\n", arg);
return -EINVAL;
@ -1892,6 +1904,7 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
free(tev->event);
free(tev->group);
free(tev->point.symbol);
free(tev->point.realname);
free(tev->point.module);
for (i = 0; i < tev->nargs; i++) {
free(tev->args[i].name);
@ -1969,7 +1982,7 @@ static int open_probe_events(const char *trace_file, bool readwrite)
if (ret >= 0) {
pr_debug("Opening %s write=%d\n", buf, readwrite);
if (readwrite && !probe_event_dry_run)
ret = open(buf, O_RDWR, O_APPEND);
ret = open(buf, O_RDWR | O_APPEND, 0);
else
ret = open(buf, O_RDONLY, 0);
@ -2369,6 +2382,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
struct strlist *namelist;
LIST_HEAD(blacklist);
struct kprobe_blacklist_node *node;
bool safename;
if (pev->uprobes)
fd = open_uprobe_events(true);
@ -2384,7 +2398,8 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
namelist = get_probe_trace_event_names(fd, false);
if (!namelist) {
pr_debug("Failed to get current event list.\n");
return -EIO;
ret = -ENOMEM;
goto close_out;
}
/* Get kprobe blacklist if exists */
if (!pev->uprobes) {
@ -2393,10 +2408,14 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
pr_debug("No kprobe blacklist support, ignored\n");
}
safename = (pev->point.function && !strisglob(pev->point.function));
ret = 0;
pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
for (i = 0; i < ntevs; i++) {
tev = &tevs[i];
/* Skip if the symbol is out of .text (marked previously) */
if (!tev->point.symbol)
continue;
/* Ensure that the address is NOT blacklisted */
node = kprobe_blacklist__find_by_address(&blacklist,
tev->point.address);
@ -2408,10 +2427,10 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
if (pev->event)
event = pev->event;
else
if (pev->point.function)
if (safename)
event = pev->point.function;
else
event = tev->point.symbol;
event = tev->point.realname;
if (pev->group)
group = pev->group;
else
@ -2467,6 +2486,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
kprobe_blacklist__delete(&blacklist);
strlist__delete(namelist);
close_out:
close(fd);
return ret;
}
@ -2475,9 +2495,11 @@ static int find_probe_functions(struct map *map, char *name)
{
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
map__for_each_symbol_by_name(map, name, sym) {
found++;
map__for_each_symbol(map, sym, tmp) {
if (strglobmatch(sym->name, name))
found++;
}
return found;
@ -2495,8 +2517,7 @@ void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
* Return an error or the number of found probe_trace_event
*/
static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs, const char *target)
struct probe_trace_event **tevs)
{
struct map *map = NULL;
struct ref_reloc_sym *reloc_sym = NULL;
@ -2507,7 +2528,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
int num_matched_functions;
int ret, i;
map = get_target_map(target, pev->uprobes);
map = get_target_map(pev->target, pev->uprobes);
if (!map) {
ret = -EINVAL;
goto out;
@ -2520,12 +2541,12 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
num_matched_functions = find_probe_functions(map, pp->function);
if (num_matched_functions == 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
target ? : "kernel");
pev->target ? : "kernel");
ret = -ENOENT;
goto out;
} else if (num_matched_functions > max_tevs) {
} else if (num_matched_functions > probe_conf.max_probes) {
pr_err("Too many functions matched in %s\n",
target ? : "kernel");
pev->target ? : "kernel");
ret = -E2BIG;
goto out;
}
@ -2573,8 +2594,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
tp->offset = pp->offset;
}
tp->retprobe = pp->retprobe;
if (target)
tev->point.module = strdup_or_goto(target, nomem_out);
if (pev->target)
tev->point.module = strdup_or_goto(pev->target,
nomem_out);
tev->uprobes = pev->uprobes;
tev->nargs = pev->nargs;
if (tev->nargs) {
@ -2614,14 +2636,13 @@ err_out:
bool __weak arch__prefers_symtab(void) { return false; }
static int convert_to_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs, const char *target)
struct probe_trace_event **tevs)
{
int ret;
if (pev->uprobes && !pev->group) {
/* Replace group name if not given */
ret = convert_exec_to_group(target, &pev->group);
ret = convert_exec_to_group(pev->target, &pev->group);
if (ret != 0) {
pr_warning("Failed to make a group name.\n");
return ret;
@ -2629,17 +2650,17 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
}
if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
ret = find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
ret = find_probe_trace_events_from_map(pev, tevs);
if (ret > 0)
return ret; /* Found in symbol table */
}
/* Convert perf_probe_event with debuginfo */
ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
ret = try_to_find_probe_trace_events(pev, tevs);
if (ret != 0)
return ret; /* Found in debuginfo or got an error */
return find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
return find_probe_trace_events_from_map(pev, tevs);
}
struct __event_package {
@ -2648,8 +2669,7 @@ struct __event_package {
int ntevs;
};
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
int max_tevs, bool force_add)
int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
{
int i, j, ret;
struct __event_package *pkgs;
@ -2671,9 +2691,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
pkgs[i].pev = &pevs[i];
/* Convert with or without debuginfo */
ret = convert_to_probe_trace_events(pkgs[i].pev,
&pkgs[i].tevs,
max_tevs,
pkgs[i].pev->target);
&pkgs[i].tevs);
if (ret < 0)
goto end;
pkgs[i].ntevs = ret;
@ -2682,7 +2700,8 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
/* Loop 2: add all events */
for (i = 0; i < npevs; i++) {
ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
pkgs[i].ntevs, force_add);
pkgs[i].ntevs,
probe_conf.force_add);
if (ret < 0)
break;
}

View file

@ -6,10 +6,19 @@
#include "strlist.h"
#include "strfilter.h"
/* Probe related configurations */
struct probe_conf {
bool show_ext_vars;
bool force_add;
bool no_inlines;
int max_probes;
};
extern struct probe_conf probe_conf;
extern bool probe_event_dry_run;
/* kprobe-tracer and uprobe-tracer tracing point */
struct probe_trace_point {
char *realname; /* function real name (if needed) */
char *symbol; /* Base symbol */
char *module; /* Module name */
unsigned long offset; /* Offset from symbol */
@ -124,15 +133,13 @@ extern int line_range__init(struct line_range *lr);
/* Internal use: Return kernel/module path */
extern const char *kernel_get_module_path(const char *module);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
int max_probe_points, bool force_add);
extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
extern int del_perf_probe_events(struct strfilter *filter);
extern int show_perf_probe_events(struct strfilter *filter);
extern int show_line_range(struct line_range *lr, const char *module,
bool user);
extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
int max_probe_points, const char *module,
struct strfilter *filter, bool externs);
struct strfilter *filter);
extern int show_available_funcs(const char *module, struct strfilter *filter,
bool user);
bool arch__prefers_symtab(void);

View file

@ -717,7 +717,7 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
}
/* If the function name is given, that's what user expects */
if (fsp->function) {
if (die_compare_name(fn_die, fsp->function)) {
if (die_match_name(fn_die, fsp->function)) {
memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
fsp->found = true;
return 1;
@ -920,13 +920,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
/* Check tag and diename */
if (!die_is_func_def(sp_die) ||
!die_compare_name(sp_die, pp->function))
!die_match_name(sp_die, pp->function))
return DWARF_CB_OK;
/* Check declared file */
if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
return DWARF_CB_OK;
pr_debug("Matched function: %s\n", dwarf_diename(sp_die));
pf->fname = dwarf_decl_file(sp_die);
if (pp->line) { /* Function relative line */
dwarf_decl_line(sp_die, &pf->lno);
@ -943,10 +944,20 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
/* TODO: Check the address in this function */
param->retval = call_probe_finder(sp_die, pf);
}
} else
} else if (!probe_conf.no_inlines) {
/* Inlined function: search instances */
param->retval = die_walk_instances(sp_die,
probe_point_inline_cb, (void *)pf);
/* This could be a non-existed inline definition */
if (param->retval == -ENOENT && strisglob(pp->function))
param->retval = 0;
}
/* We need to find other candidates */
if (strisglob(pp->function) && param->retval >= 0) {
param->retval = 0; /* We have to clear the result */
return DWARF_CB_OK;
}
return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
}
@ -975,7 +986,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
return DWARF_CB_OK;
if (die_compare_name(param->sp_die, param->function)) {
if (die_match_name(param->sp_die, param->function)) {
if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
return DWARF_CB_OK;
@ -1028,7 +1039,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
return -ENOMEM;
/* Fastpath: lookup by function name from .debug_pubnames section */
if (pp->function) {
if (pp->function && !strisglob(pp->function)) {
struct pubname_callback_param pubname_param = {
.function = pp->function,
.file = pp->file,
@ -1087,6 +1098,7 @@ found:
struct local_vars_finder {
struct probe_finder *pf;
struct perf_probe_arg *args;
bool vars;
int max_args;
int nargs;
int ret;
@ -1101,7 +1113,7 @@ static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
tag = dwarf_tag(die_mem);
if (tag == DW_TAG_formal_parameter ||
tag == DW_TAG_variable) {
(tag == DW_TAG_variable && vf->vars)) {
if (convert_variable_location(die_mem, vf->pf->addr,
vf->pf->fb_ops, &pf->sp_die,
NULL) == 0) {
@ -1127,26 +1139,28 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
Dwarf_Die die_mem;
int i;
int n = 0;
struct local_vars_finder vf = {.pf = pf, .args = args,
struct local_vars_finder vf = {.pf = pf, .args = args, .vars = false,
.max_args = MAX_PROBE_ARGS, .ret = 0};
for (i = 0; i < pf->pev->nargs; i++) {
/* var never be NULL */
if (strcmp(pf->pev->args[i].var, "$vars") == 0) {
pr_debug("Expanding $vars into:");
vf.nargs = n;
/* Special local variables */
die_find_child(sc_die, copy_variables_cb, (void *)&vf,
&die_mem);
pr_debug(" (%d)\n", vf.nargs - n);
if (vf.ret < 0)
return vf.ret;
n = vf.nargs;
} else {
if (strcmp(pf->pev->args[i].var, PROBE_ARG_VARS) == 0)
vf.vars = true;
else if (strcmp(pf->pev->args[i].var, PROBE_ARG_PARAMS) != 0) {
/* Copy normal argument */
args[n] = pf->pev->args[i];
n++;
continue;
}
pr_debug("Expanding %s into:", pf->pev->args[i].var);
vf.nargs = n;
/* Special local variables */
die_find_child(sc_die, copy_variables_cb, (void *)&vf,
&die_mem);
pr_debug(" (%d)\n", vf.nargs - n);
if (vf.ret < 0)
return vf.ret;
n = vf.nargs;
}
return n;
}
@ -1174,6 +1188,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
if (ret < 0)
return ret;
tev->point.realname = strdup(dwarf_diename(sc_die));
if (!tev->point.realname)
return -ENOMEM;
pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
tev->point.offset);
@ -1211,15 +1229,15 @@ end:
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct probe_trace_event **tevs, int max_tevs)
struct probe_trace_event **tevs)
{
struct trace_event_finder tf = {
.pf = {.pev = pev, .callback = add_probe_trace_event},
.mod = dbg->mod, .max_tevs = max_tevs};
.max_tevs = probe_conf.max_probes, .mod = dbg->mod};
int ret;
/* Allocate result tevs array */
*tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
*tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
if (*tevs == NULL)
return -ENOMEM;
@ -1300,9 +1318,9 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
/* Find external variables */
if (!af->externs)
if (!probe_conf.show_ext_vars)
goto out;
/* Don't need to search child DIE for externs. */
/* Don't need to search child DIE for external vars. */
af->child = false;
die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
@ -1322,17 +1340,16 @@ out:
*/
int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct variable_list **vls,
int max_vls, bool externs)
struct variable_list **vls)
{
struct available_var_finder af = {
.pf = {.pev = pev, .callback = add_available_vars},
.mod = dbg->mod,
.max_vls = max_vls, .externs = externs};
.max_vls = probe_conf.max_probes};
int ret;
/* Allocate result vls array */
*vls = zalloc(sizeof(struct variable_list) * max_vls);
*vls = zalloc(sizeof(struct variable_list) * af.max_vls);
if (*vls == NULL)
return -ENOMEM;
@ -1533,7 +1550,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
return DWARF_CB_OK;
if (die_is_func_def(sp_die) &&
die_compare_name(sp_die, lr->function)) {
die_match_name(sp_die, lr->function)) {
lf->fname = dwarf_decl_file(sp_die);
dwarf_decl_line(sp_die, &lr->offset);
pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);

View file

@ -10,6 +10,9 @@
#define MAX_PROBES 128
#define MAX_PROBE_ARGS 128
#define PROBE_ARG_VARS "$vars"
#define PROBE_ARG_PARAMS "$params"
static inline int is_c_varname(const char *name)
{
/* TODO */
@ -37,8 +40,7 @@ extern void debuginfo__delete(struct debuginfo *dbg);
/* Find probe_trace_events specified by perf_probe_event from debuginfo */
extern int debuginfo__find_trace_events(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct probe_trace_event **tevs,
int max_tevs);
struct probe_trace_event **tevs);
/* Find a perf_probe_point from debuginfo */
extern int debuginfo__find_probe_point(struct debuginfo *dbg,
@ -52,8 +54,7 @@ extern int debuginfo__find_line_range(struct debuginfo *dbg,
/* Find available variables */
extern int debuginfo__find_available_vars_at(struct debuginfo *dbg,
struct perf_probe_event *pev,
struct variable_list **vls,
int max_points, bool externs);
struct variable_list **vls);
/* Find a src file from a DWARF tag path */
int get_real_path(const char *raw_path, const char *comp_dir,
@ -96,7 +97,6 @@ struct available_var_finder {
struct variable_list *vls; /* Found variable lists */
int nvls; /* Number of variable lists */
int max_vls; /* Max no. of variable lists */
bool externs; /* Find external vars too */
bool child; /* Search child scopes */
};

View file

@ -18,7 +18,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
if (pid == thread->tid || pid == -1) {
thread->mg = map_groups__new(machine);
} else {
leader = machine__findnew_thread(machine, pid, pid);
leader = __machine__findnew_thread(machine, pid, pid);
if (leader)
thread->mg = map_groups__get(leader->mg);
}
@ -53,7 +53,9 @@ struct thread *thread__new(pid_t pid, pid_t tid)
goto err_thread;
list_add(&comm->list, &thread->comm_list);
atomic_set(&thread->refcnt, 0);
INIT_LIST_HEAD(&thread->node);
RB_CLEAR_NODE(&thread->rb_node);
}
return thread;
@ -67,6 +69,9 @@ void thread__delete(struct thread *thread)
{
struct comm *comm, *tmp;
BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
BUG_ON(!list_empty(&thread->node));
thread_stack__free(thread);
if (thread->mg) {
@ -84,13 +89,14 @@ void thread__delete(struct thread *thread)
struct thread *thread__get(struct thread *thread)
{
++thread->refcnt;
if (thread)
atomic_inc(&thread->refcnt);
return thread;
}
void thread__put(struct thread *thread)
{
if (thread && --thread->refcnt == 0) {
if (thread && atomic_dec_and_test(&thread->refcnt)) {
list_del_init(&thread->node);
thread__delete(thread);
}

View file

@ -1,6 +1,7 @@
#ifndef __PERF_THREAD_H
#define __PERF_THREAD_H
#include <linux/atomic.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <unistd.h>
@ -21,7 +22,7 @@ struct thread {
pid_t tid;
pid_t ppid;
int cpu;
int refcnt;
atomic_t refcnt;
char shortname[3];
bool comm_set;
bool dead; /* if set thread has exited */

View file

@ -257,6 +257,10 @@ char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
bool strglobmatch(const char *str, const char *pat);
bool strlazymatch(const char *str, const char *pat);
static inline bool strisglob(const char *str)
{
return strpbrk(str, "*?[") != NULL;
}
int strtailcmp(const char *s1, const char *s2);
char *strxfrchar(char *s, char from, char to);
unsigned long convert_unit(unsigned long value, char *unit);