parent
278a838231
commit
1669e4a5d1
|
@ -7,8 +7,6 @@
|
|||
*.ipynb filter=nbstripout -diff
|
||||
external/ffmpeg/bin/ffmpeg_cuda filter=lfs diff=lfs merge=lfs -text
|
||||
models/segnet.keras filter=lfs diff=lfs merge=lfs -text
|
||||
external/zmq/lib/libzmq.a filter=lfs diff=lfs merge=lfs -text
|
||||
external/zmq/lib/libczmq.a filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/acado/x64/lib/libacado_toolkit.a filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/acado/x64/lib/libacado_toolkit_s.so.1.2.2beta filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/acado/x64/lib/libacado_casadi.a filter=lfs diff=lfs merge=lfs -text
|
||||
|
@ -21,8 +19,5 @@ phonelibs/acado/aarch64/lib/libacado_csparse.a filter=lfs diff=lfs merge=lfs -te
|
|||
phonelibs/acado/aarch64/lib/libacado_qpoases.a filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/fastcv/aarch64/libfastcvopt.so filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/fastcv/aarch64/libfastcvadsp_stub.so filter=lfs diff=lfs merge=lfs -text
|
||||
external/mac/MP4Box filter=lfs diff=lfs merge=lfs -text
|
||||
models/segnet2.keras filter=lfs diff=lfs merge=lfs -text
|
||||
external/opencl/*.deb filter=lfs diff=lfs merge=lfs -text
|
||||
phonelibs/zmq/aarch64-linux/lib/libzmq.a filter=lfs diff=lfs merge=lfs -text
|
||||
external/azcopy/azcopy filter=lfs diff=lfs merge=lfs -text
|
||||
|
|
|
@ -12,13 +12,13 @@ repos:
|
|||
rev: master
|
||||
hooks:
|
||||
- id: mypy
|
||||
exclude: '^(pyextra)|(external)|(cereal)|(rednose)|(panda)|(laika)|(opendbc)|(laika_repo)|(rednose_repo)/'
|
||||
exclude: '^(pyextra)|(cereal)|(rednose)|(panda)|(laika)|(opendbc)|(laika_repo)|(rednose_repo)/'
|
||||
additional_dependencies: ['git+https://github.com/numpy/numpy-stubs']
|
||||
- repo: https://github.com/PyCQA/flake8
|
||||
rev: master
|
||||
hooks:
|
||||
- id: flake8
|
||||
exclude: '^(pyextra)|(external)|(cereal)|(rednose)|(panda)|(laika)|(opendbc)|(laika_repo)|(rednose_repo)|(selfdrive/debug)/'
|
||||
exclude: '^(pyextra)|(cereal)|(rednose)|(panda)|(laika)|(opendbc)|(laika_repo)|(rednose_repo)|(selfdrive/debug)/'
|
||||
args:
|
||||
- --select=F,E112,E113,E304,E501,E502,E701,E702,E703,E71,E72,E731,W191,W6
|
||||
- --max-line-length=240
|
||||
|
@ -30,7 +30,7 @@ repos:
|
|||
entry: pylint
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: '^(pyextra)|(external)|(cereal)|(rednose)|(panda)|(laika)|(laika_repo)|(rednose_repo)/'
|
||||
exclude: '^(pyextra)|(cereal)|(rednose)|(panda)|(laika)|(laika_repo)|(rednose_repo)/'
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cppcheck
|
||||
|
@ -38,7 +38,7 @@ repos:
|
|||
entry: cppcheck
|
||||
language: system
|
||||
types: [c++]
|
||||
exclude: '^(phonelibs)|(external)|(cereal)|(opendbc)|(panda)|(tools)|(selfdrive/modeld/thneed/debug)|(selfdrive/modeld/test)|(selfdrive/camerad/test)/|(installer)'
|
||||
exclude: '^(phonelibs)|(cereal)|(opendbc)|(panda)|(tools)|(selfdrive/modeld/thneed/debug)|(selfdrive/modeld/test)|(selfdrive/camerad/test)/|(installer)'
|
||||
args:
|
||||
- --error-exitcode=1
|
||||
- --language=c++
|
||||
|
|
21
SConstruct
21
SConstruct
|
@ -31,11 +31,12 @@ if arch == "aarch64" and TICI:
|
|||
USE_WEBCAM = os.getenv("USE_WEBCAM") is not None
|
||||
QCOM_REPLAY = arch == "aarch64" and os.getenv("QCOM_REPLAY") is not None
|
||||
|
||||
lenv = {
|
||||
"PATH": os.environ['PATH'],
|
||||
}
|
||||
|
||||
if arch == "aarch64" or arch == "larch64":
|
||||
lenv = {
|
||||
"LD_LIBRARY_PATH": '/data/data/com.termux/files/usr/lib',
|
||||
"PATH": os.environ['PATH'],
|
||||
}
|
||||
lenv["LD_LIBRARY_PATH"] = '/data/data/com.termux/files/usr/lib'
|
||||
|
||||
if arch == "aarch64":
|
||||
# android
|
||||
|
@ -78,13 +79,7 @@ if arch == "aarch64" or arch == "larch64":
|
|||
else:
|
||||
cflags = []
|
||||
cxxflags = []
|
||||
|
||||
lenv = {
|
||||
"PATH": "#external/bin:" + os.environ['PATH'],
|
||||
}
|
||||
cpppath = [
|
||||
"#external/tensorflow/include",
|
||||
]
|
||||
cpppath = []
|
||||
|
||||
if arch == "Darwin":
|
||||
libpath = [
|
||||
|
@ -102,7 +97,6 @@ else:
|
|||
libpath = [
|
||||
"#phonelibs/snpe/x86_64-linux-clang",
|
||||
"#phonelibs/libyuv/x64/lib",
|
||||
"#external/tensorflow/lib",
|
||||
"#cereal",
|
||||
"#selfdrive/common",
|
||||
"/usr/lib",
|
||||
|
@ -111,7 +105,6 @@ else:
|
|||
|
||||
rpath = [
|
||||
"phonelibs/snpe/x86_64-linux-clang",
|
||||
"external/tensorflow/lib",
|
||||
"cereal",
|
||||
"selfdrive/common"
|
||||
]
|
||||
|
@ -168,8 +161,8 @@ env = Environment(
|
|||
"#selfdrive/modeld",
|
||||
"#selfdrive/sensord",
|
||||
"#selfdrive/ui",
|
||||
"#cereal/messaging",
|
||||
"#cereal",
|
||||
"#cereal/messaging",
|
||||
"#opendbc/can",
|
||||
],
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../pyflame/flame.sh
|
Binary file not shown.
Binary file not shown.
|
@ -1,136 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2016 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
class CallSite:
|
||||
|
||||
def __init__(self, method, dso):
|
||||
self.method = method
|
||||
self.dso = dso
|
||||
|
||||
|
||||
class Thread:
|
||||
|
||||
def __init__(self, tid, pid):
|
||||
self.tid = tid
|
||||
self.pid = pid
|
||||
self.name = ""
|
||||
self.samples = []
|
||||
self.flamegraph = FlameGraphCallSite("root", "", 0)
|
||||
self.num_samples = 0
|
||||
self.num_events = 0
|
||||
|
||||
def add_callchain(self, callchain, symbol, sample):
|
||||
self.name = sample.thread_comm
|
||||
self.num_samples += 1
|
||||
self.num_events += sample.period
|
||||
chain = []
|
||||
for j in range(callchain.nr):
|
||||
entry = callchain.entries[callchain.nr - j - 1]
|
||||
if entry.ip == 0:
|
||||
continue
|
||||
chain.append(CallSite(entry.symbol.symbol_name, entry.symbol.dso_name))
|
||||
|
||||
chain.append(CallSite(symbol.symbol_name, symbol.dso_name))
|
||||
self.flamegraph.add_callchain(chain, sample.period)
|
||||
|
||||
|
||||
class Process:
|
||||
|
||||
def __init__(self, name, pid):
|
||||
self.name = name
|
||||
self.pid = pid
|
||||
self.threads = {}
|
||||
self.cmd = ""
|
||||
self.props = {}
|
||||
# num_samples is the count of samples recorded in the profiling file.
|
||||
self.num_samples = 0
|
||||
# num_events is the count of events contained in all samples. Each sample contains a
|
||||
# count of events happened since last sample. If we use cpu-cycles event, the count
|
||||
# shows how many cpu-cycles have happened during recording.
|
||||
self.num_events = 0
|
||||
|
||||
def get_thread(self, tid, pid):
|
||||
thread = self.threads.get(tid)
|
||||
if thread is None:
|
||||
thread = self.threads[tid] = Thread(tid, pid)
|
||||
return thread
|
||||
|
||||
def add_sample(self, sample, symbol, callchain):
|
||||
thread = self.get_thread(sample.tid, sample.pid)
|
||||
thread.add_callchain(callchain, symbol, sample)
|
||||
self.num_samples += 1
|
||||
# sample.period is the count of events happened since last sample.
|
||||
self.num_events += sample.period
|
||||
|
||||
|
||||
class FlameGraphCallSite:
|
||||
|
||||
callsite_counter = 0
|
||||
@classmethod
|
||||
def _get_next_callsite_id(cls):
|
||||
cls.callsite_counter += 1
|
||||
return cls.callsite_counter
|
||||
|
||||
def __init__(self, method, dso, id):
|
||||
# map from (dso, method) to FlameGraphCallSite. Used to speed up add_callchain().
|
||||
self.child_dict = {}
|
||||
self.children = []
|
||||
self.method = method
|
||||
self.dso = dso
|
||||
self.num_events = 0
|
||||
self.offset = 0 # Offset allows position nodes in different branches.
|
||||
self.id = id
|
||||
|
||||
def weight(self):
|
||||
return float(self.num_events)
|
||||
|
||||
def add_callchain(self, chain, num_events):
|
||||
self.num_events += num_events
|
||||
current = self
|
||||
for callsite in chain:
|
||||
current = current._get_child(callsite)
|
||||
current.num_events += num_events
|
||||
|
||||
def _get_child(self, callsite):
|
||||
key = (callsite.dso, callsite.method)
|
||||
child = self.child_dict.get(key)
|
||||
if child is None:
|
||||
child = self.child_dict[key] = FlameGraphCallSite(callsite.method, callsite.dso,
|
||||
self._get_next_callsite_id())
|
||||
return child
|
||||
|
||||
def trim_callchain(self, min_num_events):
|
||||
""" Remove call sites with num_events < min_num_events in the subtree.
|
||||
Remaining children are collected in a list.
|
||||
"""
|
||||
for key in self.child_dict:
|
||||
child = self.child_dict[key]
|
||||
if child.num_events >= min_num_events:
|
||||
child.trim_callchain(min_num_events)
|
||||
self.children.append(child)
|
||||
# Relese child_dict since it will not be used.
|
||||
self.child_dict = None
|
||||
|
||||
def get_max_depth(self):
|
||||
return max([c.get_max_depth() for c in self.children]) + 1 if self.children else 1
|
||||
|
||||
def generate_offset(self, start_offset):
|
||||
self.offset = start_offset
|
||||
child_offset = start_offset
|
||||
for child in self.children:
|
||||
child_offset = child.generate_offset(child_offset)
|
||||
return self.offset + self.num_events
|
File diff suppressed because one or more lines are too long
|
@ -1,333 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2016 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
Inferno is a tool to generate flamegraphs for android programs. It was originally written
|
||||
to profile surfaceflinger (Android compositor) but it can be used for other C++ program.
|
||||
It uses simpleperf to collect data. Programs have to be compiled with frame pointers which
|
||||
excludes ART based programs for the time being.
|
||||
|
||||
Here is how it works:
|
||||
|
||||
1/ Data collection is started via simpleperf and pulled locally as "perf.data".
|
||||
2/ The raw format is parsed, callstacks are merged to form a flamegraph data structure.
|
||||
3/ The data structure is used to generate a SVG embedded into an HTML page.
|
||||
4/ Javascript is injected to allow flamegraph navigation, search, coloring model.
|
||||
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
scripts_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
sys.path.append(scripts_path)
|
||||
from simpleperf_report_lib import ReportLib
|
||||
from utils import log_exit, log_info, AdbHelper, open_report_in_browser
|
||||
|
||||
from data_types import *
|
||||
from svg_renderer import *
|
||||
|
||||
|
||||
def collect_data(args):
|
||||
""" Run app_profiler.py to generate record file. """
|
||||
app_profiler_args = [sys.executable, os.path.join(scripts_path, "app_profiler.py"), "-nb"]
|
||||
if args.app:
|
||||
app_profiler_args += ["-p", args.app]
|
||||
elif args.native_program:
|
||||
app_profiler_args += ["-np", args.native_program]
|
||||
else:
|
||||
log_exit("Please set profiling target with -p or -np option.")
|
||||
if args.compile_java_code:
|
||||
app_profiler_args.append("--compile_java_code")
|
||||
if args.disable_adb_root:
|
||||
app_profiler_args.append("--disable_adb_root")
|
||||
record_arg_str = ""
|
||||
if args.dwarf_unwinding:
|
||||
record_arg_str += "-g "
|
||||
else:
|
||||
record_arg_str += "--call-graph fp "
|
||||
if args.events:
|
||||
tokens = args.events.split()
|
||||
if len(tokens) == 2:
|
||||
num_events = tokens[0]
|
||||
event_name = tokens[1]
|
||||
record_arg_str += "-c %s -e %s " % (num_events, event_name)
|
||||
else:
|
||||
log_exit("Event format string of -e option cann't be recognized.")
|
||||
log_info("Using event sampling (-c %s -e %s)." % (num_events, event_name))
|
||||
else:
|
||||
record_arg_str += "-f %d " % args.sample_frequency
|
||||
log_info("Using frequency sampling (-f %d)." % args.sample_frequency)
|
||||
record_arg_str += "--duration %d " % args.capture_duration
|
||||
app_profiler_args += ["-r", record_arg_str]
|
||||
returncode = subprocess.call(app_profiler_args)
|
||||
return returncode == 0
|
||||
|
||||
|
||||
def parse_samples(process, args, sample_filter_fn):
|
||||
"""Read samples from record file.
|
||||
process: Process object
|
||||
args: arguments
|
||||
sample_filter_fn: if not None, is used to modify and filter samples.
|
||||
It returns false for samples should be filtered out.
|
||||
"""
|
||||
|
||||
record_file = args.record_file
|
||||
symfs_dir = args.symfs
|
||||
kallsyms_file = args.kallsyms
|
||||
|
||||
lib = ReportLib()
|
||||
|
||||
lib.ShowIpForUnknownSymbol()
|
||||
if symfs_dir:
|
||||
lib.SetSymfs(symfs_dir)
|
||||
if record_file:
|
||||
lib.SetRecordFile(record_file)
|
||||
if kallsyms_file:
|
||||
lib.SetKallsymsFile(kallsyms_file)
|
||||
if args.show_art_frames:
|
||||
lib.ShowArtFrames(True)
|
||||
process.cmd = lib.GetRecordCmd()
|
||||
product_props = lib.MetaInfo().get("product_props")
|
||||
if product_props:
|
||||
tuple = product_props.split(':')
|
||||
process.props['ro.product.manufacturer'] = tuple[0]
|
||||
process.props['ro.product.model'] = tuple[1]
|
||||
process.props['ro.product.name'] = tuple[2]
|
||||
if lib.MetaInfo().get('trace_offcpu') == 'true':
|
||||
process.props['trace_offcpu'] = True
|
||||
if args.one_flamegraph:
|
||||
log_exit("It doesn't make sense to report with --one-flamegraph for perf.data " +
|
||||
"recorded with --trace-offcpu.""")
|
||||
else:
|
||||
process.props['trace_offcpu'] = False
|
||||
|
||||
while True:
|
||||
sample = lib.GetNextSample()
|
||||
if sample is None:
|
||||
lib.Close()
|
||||
break
|
||||
symbol = lib.GetSymbolOfCurrentSample()
|
||||
callchain = lib.GetCallChainOfCurrentSample()
|
||||
if sample_filter_fn and not sample_filter_fn(sample, symbol, callchain):
|
||||
continue
|
||||
process.add_sample(sample, symbol, callchain)
|
||||
|
||||
if process.pid == 0:
|
||||
main_threads = [thread for thread in process.threads.values() if thread.tid == thread.pid]
|
||||
if main_threads:
|
||||
process.name = main_threads[0].name
|
||||
process.pid = main_threads[0].pid
|
||||
|
||||
for thread in process.threads.values():
|
||||
min_event_count = thread.num_events * args.min_callchain_percentage * 0.01
|
||||
thread.flamegraph.trim_callchain(min_event_count)
|
||||
|
||||
log_info("Parsed %s callchains." % process.num_samples)
|
||||
|
||||
|
||||
def get_local_asset_content(local_path):
|
||||
"""
|
||||
Retrieves local package text content
|
||||
:param local_path: str, filename of local asset
|
||||
:return: str, the content of local_path
|
||||
"""
|
||||
with open(os.path.join(os.path.dirname(__file__), local_path), 'r') as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
def output_report(process, args):
|
||||
"""
|
||||
Generates a HTML report representing the result of simpleperf sampling as flamegraph
|
||||
:param process: Process object
|
||||
:return: str, absolute path to the file
|
||||
"""
|
||||
f = open(args.report_path, 'w')
|
||||
filepath = os.path.realpath(f.name)
|
||||
if not args.embedded_flamegraph:
|
||||
f.write("<html><body>")
|
||||
f.write("<div id='flamegraph_id' style='font-family: Monospace; %s'>" % (
|
||||
"display: none;" if args.embedded_flamegraph else ""))
|
||||
f.write("""<style type="text/css"> .s { stroke:black; stroke-width:0.5; cursor:pointer;}
|
||||
</style>""")
|
||||
f.write('<style type="text/css"> .t:hover { cursor:pointer; } </style>')
|
||||
f.write('<img height="180" alt = "Embedded Image" src ="data')
|
||||
f.write(get_local_asset_content("inferno.b64"))
|
||||
f.write('"/>')
|
||||
process_entry = ("Process : %s (%d)<br/>" % (process.name, process.pid)) if process.pid else ""
|
||||
if process.props['trace_offcpu']:
|
||||
event_entry = 'Total time: %s<br/>' % get_proper_scaled_time_string(process.num_events)
|
||||
else:
|
||||
event_entry = 'Event count: %s<br/>' % ("{:,}".format(process.num_events))
|
||||
# TODO: collect capture duration info from perf.data.
|
||||
duration_entry = ("Duration: %s seconds<br/>" % args.capture_duration
|
||||
) if args.capture_duration else ""
|
||||
f.write("""<div style='display:inline-block;'>
|
||||
<font size='8'>
|
||||
Inferno Flamegraph Report%s</font><br/><br/>
|
||||
%s
|
||||
Date : %s<br/>
|
||||
Threads : %d <br/>
|
||||
Samples : %d<br/>
|
||||
%s
|
||||
%s""" % (
|
||||
(': ' + args.title) if args.title else '',
|
||||
process_entry,
|
||||
datetime.datetime.now().strftime("%Y-%m-%d (%A) %H:%M:%S"),
|
||||
len(process.threads),
|
||||
process.num_samples,
|
||||
event_entry,
|
||||
duration_entry))
|
||||
if 'ro.product.model' in process.props:
|
||||
f.write(
|
||||
"Machine : %s (%s) by %s<br/>" %
|
||||
(process.props["ro.product.model"],
|
||||
process.props["ro.product.name"],
|
||||
process.props["ro.product.manufacturer"]))
|
||||
if process.cmd:
|
||||
f.write("Capture : %s<br/><br/>" % process.cmd)
|
||||
f.write("</div>")
|
||||
f.write("""<br/><br/>
|
||||
<div>Navigate with WASD, zoom in with SPACE, zoom out with BACKSPACE.</div>""")
|
||||
f.write("<script>%s</script>" % get_local_asset_content("script.js"))
|
||||
if not args.embedded_flamegraph:
|
||||
f.write("<script>document.addEventListener('DOMContentLoaded', flamegraphInit);</script>")
|
||||
|
||||
# Sort threads by the event count in a thread.
|
||||
for thread in sorted(process.threads.values(), key=lambda x: x.num_events, reverse=True):
|
||||
f.write("<br/><br/><b>Thread %d (%s) (%d samples):</b><br/>\n\n\n\n" % (
|
||||
thread.tid, thread.name, thread.num_samples))
|
||||
renderSVG(process, thread.flamegraph, f, args.color)
|
||||
|
||||
f.write("</div>")
|
||||
if not args.embedded_flamegraph:
|
||||
f.write("</body></html")
|
||||
f.close()
|
||||
return "file://" + filepath
|
||||
|
||||
|
||||
def generate_threads_offsets(process):
|
||||
for thread in process.threads.values():
|
||||
thread.flamegraph.generate_offset(0)
|
||||
|
||||
|
||||
def collect_machine_info(process):
|
||||
adb = AdbHelper()
|
||||
process.props = {}
|
||||
process.props['ro.product.model'] = adb.get_property('ro.product.model')
|
||||
process.props['ro.product.name'] = adb.get_property('ro.product.name')
|
||||
process.props['ro.product.manufacturer'] = adb.get_property('ro.product.manufacturer')
|
||||
|
||||
|
||||
def main():
|
||||
# Allow deep callchain with length >1000.
|
||||
sys.setrecursionlimit(1500)
|
||||
parser = argparse.ArgumentParser(description="""Report samples in perf.data. Default option
|
||||
is: "-np surfaceflinger -f 6000 -t 10".""")
|
||||
record_group = parser.add_argument_group('Record options')
|
||||
record_group.add_argument('-du', '--dwarf_unwinding', action='store_true', help="""Perform
|
||||
unwinding using dwarf instead of fp.""")
|
||||
record_group.add_argument('-e', '--events', default="", help="""Sample based on event
|
||||
occurences instead of frequency. Format expected is
|
||||
"event_counts event_name". e.g: "10000 cpu-cyles". A few examples
|
||||
of event_name: cpu-cycles, cache-references, cache-misses,
|
||||
branch-instructions, branch-misses""")
|
||||
record_group.add_argument('-f', '--sample_frequency', type=int, default=6000, help="""Sample
|
||||
frequency""")
|
||||
record_group.add_argument('--compile_java_code', action='store_true',
|
||||
help="""On Android N and Android O, we need to compile Java code
|
||||
into native instructions to profile Java code. Android O
|
||||
also needs wrap.sh in the apk to use the native
|
||||
instructions.""")
|
||||
record_group.add_argument('-np', '--native_program', default="surfaceflinger", help="""Profile
|
||||
a native program. The program should be running on the device.
|
||||
Like -np surfaceflinger.""")
|
||||
record_group.add_argument('-p', '--app', help="""Profile an Android app, given the package
|
||||
name. Like -p com.example.android.myapp.""")
|
||||
record_group.add_argument('--record_file', default='perf.data', help='Default is perf.data.')
|
||||
record_group.add_argument('-sc', '--skip_collection', action='store_true', help="""Skip data
|
||||
collection""")
|
||||
record_group.add_argument('-t', '--capture_duration', type=int, default=10, help="""Capture
|
||||
duration in seconds.""")
|
||||
|
||||
report_group = parser.add_argument_group('Report options')
|
||||
report_group.add_argument('-c', '--color', default='hot', choices=['hot', 'dso', 'legacy'],
|
||||
help="""Color theme: hot=percentage of samples, dso=callsite DSO
|
||||
name, legacy=brendan style""")
|
||||
report_group.add_argument('--embedded_flamegraph', action='store_true', help="""Generate
|
||||
embedded flamegraph.""")
|
||||
report_group.add_argument('--kallsyms', help='Set the path to find kernel symbols.')
|
||||
report_group.add_argument('--min_callchain_percentage', default=0.01, type=float, help="""
|
||||
Set min percentage of callchains shown in the report.
|
||||
It is used to limit nodes shown in the flamegraph. For example,
|
||||
when set to 0.01, only callchains taking >= 0.01%% of the event
|
||||
count of the owner thread are collected in the report.""")
|
||||
report_group.add_argument('--no_browser', action='store_true', help="""Don't open report
|
||||
in browser.""")
|
||||
report_group.add_argument('-o', '--report_path', default='report.html', help="""Set report
|
||||
path.""")
|
||||
report_group.add_argument('--one-flamegraph', action='store_true', help="""Generate one
|
||||
flamegraph instead of one for each thread.""")
|
||||
report_group.add_argument('--symfs', help="""Set the path to find binaries with symbols and
|
||||
debug info.""")
|
||||
report_group.add_argument('--title', help='Show a title in the report.')
|
||||
report_group.add_argument('--show_art_frames', action='store_true',
|
||||
help='Show frames of internal methods in the ART Java interpreter.')
|
||||
|
||||
debug_group = parser.add_argument_group('Debug options')
|
||||
debug_group.add_argument('--disable_adb_root', action='store_true', help="""Force adb to run
|
||||
in non root mode.""")
|
||||
args = parser.parse_args()
|
||||
process = Process("", 0)
|
||||
|
||||
if not args.skip_collection:
|
||||
process.name = args.app or args.native_program
|
||||
log_info("Starting data collection stage for process '%s'." % process.name)
|
||||
if not collect_data(args):
|
||||
log_exit("Unable to collect data.")
|
||||
result, output = AdbHelper().run_and_return_output(['shell', 'pidof', process.name])
|
||||
if result:
|
||||
try:
|
||||
process.pid = int(output)
|
||||
except:
|
||||
process.pid = 0
|
||||
collect_machine_info(process)
|
||||
else:
|
||||
args.capture_duration = 0
|
||||
|
||||
sample_filter_fn = None
|
||||
if args.one_flamegraph:
|
||||
def filter_fn(sample, symbol, callchain):
|
||||
sample.pid = sample.tid = process.pid
|
||||
return True
|
||||
sample_filter_fn = filter_fn
|
||||
if not args.title:
|
||||
args.title = ''
|
||||
args.title += '(One Flamegraph)'
|
||||
|
||||
parse_samples(process, args, sample_filter_fn)
|
||||
generate_threads_offsets(process)
|
||||
report_path = output_report(process, args)
|
||||
if not args.no_browser:
|
||||
open_report_in_browser(report_path)
|
||||
|
||||
log_info("Flamegraph generated at '%s'." % report_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,274 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2017 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
'use strict';
|
||||
|
||||
function flamegraphInit() {
|
||||
let flamegraph = document.getElementById('flamegraph_id');
|
||||
let svgs = flamegraph.getElementsByTagName('svg');
|
||||
for (let i = 0; i < svgs.length; ++i) {
|
||||
createZoomHistoryStack(svgs[i]);
|
||||
adjust_text_size(svgs[i]);
|
||||
}
|
||||
|
||||
function throttle(callback) {
|
||||
let running = false;
|
||||
return function() {
|
||||
if (!running) {
|
||||
running = true;
|
||||
window.requestAnimationFrame(function () {
|
||||
callback();
|
||||
running = false;
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
window.addEventListener('resize', throttle(function() {
|
||||
let flamegraph = document.getElementById('flamegraph_id');
|
||||
let svgs = flamegraph.getElementsByTagName('svg');
|
||||
for (let i = 0; i < svgs.length; ++i) {
|
||||
adjust_text_size(svgs[i]);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
// Create a stack add the root svg element in it.
|
||||
function createZoomHistoryStack(svgElement) {
|
||||
svgElement.zoomStack = [svgElement.getElementById(svgElement.attributes['rootid'].value)];
|
||||
}
|
||||
|
||||
function adjust_node_text_size(x, svgWidth) {
|
||||
let title = x.getElementsByTagName('title')[0];
|
||||
let text = x.getElementsByTagName('text')[0];
|
||||
let rect = x.getElementsByTagName('rect')[0];
|
||||
|
||||
let width = parseFloat(rect.attributes['width'].value) * svgWidth * 0.01;
|
||||
|
||||
// Don't even bother trying to find a best fit. The area is too small.
|
||||
if (width < 28) {
|
||||
text.textContent = '';
|
||||
return;
|
||||
}
|
||||
// Remove dso and #samples which are here only for mouseover purposes.
|
||||
let methodName = title.textContent.split(' | ')[0];
|
||||
|
||||
let numCharacters;
|
||||
for (numCharacters = methodName.length; numCharacters > 4; numCharacters--) {
|
||||
// Avoid reflow by using hard-coded estimate instead of
|
||||
// text.getSubStringLength(0, numCharacters).
|
||||
if (numCharacters * 7.5 <= width) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (numCharacters == methodName.length) {
|
||||
text.textContent = methodName;
|
||||
return;
|
||||
}
|
||||
|
||||
text.textContent = methodName.substring(0, numCharacters-2) + '..';
|
||||
}
|
||||
|
||||
function adjust_text_size(svgElement) {
|
||||
let svgWidth = window.innerWidth;
|
||||
let x = svgElement.getElementsByTagName('g');
|
||||
for (let i = 0; i < x.length; i++) {
|
||||
adjust_node_text_size(x[i], svgWidth);
|
||||
}
|
||||
}
|
||||
|
||||
function zoom(e) {
|
||||
let svgElement = e.ownerSVGElement;
|
||||
let zoomStack = svgElement.zoomStack;
|
||||
zoomStack.push(e);
|
||||
displaySVGElement(svgElement);
|
||||
select(e);
|
||||
|
||||
// Show zoom out button.
|
||||
svgElement.getElementById('zoom_rect').style.display = 'block';
|
||||
svgElement.getElementById('zoom_text').style.display = 'block';
|
||||
}
|
||||
|
||||
function displaySVGElement(svgElement) {
|
||||
let zoomStack = svgElement.zoomStack;
|
||||
let e = zoomStack[zoomStack.length - 1];
|
||||
let clicked_rect = e.getElementsByTagName('rect')[0];
|
||||
let clicked_origin_x;
|
||||
let clicked_origin_y = clicked_rect.attributes['oy'].value;
|
||||
let clicked_origin_width;
|
||||
|
||||
if (zoomStack.length == 1) {
|
||||
// Show all nodes when zoomStack only contains the root node.
|
||||
// This is needed to show flamegraph containing more than one node at the root level.
|
||||
clicked_origin_x = 0;
|
||||
clicked_origin_width = 100;
|
||||
} else {
|
||||
clicked_origin_x = clicked_rect.attributes['ox'].value;
|
||||
clicked_origin_width = clicked_rect.attributes['owidth'].value;
|
||||
}
|
||||
|
||||
|
||||
let svgBox = svgElement.getBoundingClientRect();
|
||||
let svgBoxHeight = svgBox.height;
|
||||
let svgBoxWidth = 100;
|
||||
let scaleFactor = svgBoxWidth / clicked_origin_width;
|
||||
|
||||
let callsites = svgElement.getElementsByTagName('g');
|
||||
for (let i = 0; i < callsites.length; i++) {
|
||||
let text = callsites[i].getElementsByTagName('text')[0];
|
||||
let rect = callsites[i].getElementsByTagName('rect')[0];
|
||||
|
||||
let rect_o_x = parseFloat(rect.attributes['ox'].value);
|
||||
let rect_o_y = parseFloat(rect.attributes['oy'].value);
|
||||
|
||||
// Avoid multiple forced reflow by hiding nodes.
|
||||
if (rect_o_y > clicked_origin_y) {
|
||||
rect.style.display = 'none';
|
||||
text.style.display = 'none';
|
||||
continue;
|
||||
}
|
||||
rect.style.display = 'block';
|
||||
text.style.display = 'block';
|
||||
|
||||
let newrec_x = rect.attributes['x'].value = (rect_o_x - clicked_origin_x) * scaleFactor +
|
||||
'%';
|
||||
let newrec_y = rect.attributes['y'].value = rect_o_y + (svgBoxHeight - clicked_origin_y
|
||||
- 17 - 2);
|
||||
|
||||
text.attributes['y'].value = newrec_y + 12;
|
||||
text.attributes['x'].value = newrec_x;
|
||||
|
||||
rect.attributes['width'].value = (rect.attributes['owidth'].value * scaleFactor) + '%';
|
||||
}
|
||||
|
||||
adjust_text_size(svgElement);
|
||||
}
|
||||
|
||||
function unzoom(e) {
|
||||
let svgOwner = e.ownerSVGElement;
|
||||
let stack = svgOwner.zoomStack;
|
||||
|
||||
// Unhighlight whatever was selected.
|
||||
if (selected) {
|
||||
selected.classList.remove('s');
|
||||
}
|
||||
|
||||
// Stack management: Never remove the last element which is the flamegraph root.
|
||||
if (stack.length > 1) {
|
||||
let previouslySelected = stack.pop();
|
||||
select(previouslySelected);
|
||||
}
|
||||
|
||||
// Hide zoom out button.
|
||||
if (stack.length == 1) {
|
||||
svgOwner.getElementById('zoom_rect').style.display = 'none';
|
||||
svgOwner.getElementById('zoom_text').style.display = 'none';
|
||||
}
|
||||
|
||||
displaySVGElement(svgOwner);
|
||||
}
|
||||
|
||||
function search(e) {
|
||||
let term = prompt('Search for:', '');
|
||||
let callsites = e.ownerSVGElement.getElementsByTagName('g');
|
||||
|
||||
if (!term) {
|
||||
for (let i = 0; i < callsites.length; i++) {
|
||||
let rect = callsites[i].getElementsByTagName('rect')[0];
|
||||
rect.attributes['fill'].value = rect.attributes['ofill'].value;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (let i = 0; i < callsites.length; i++) {
|
||||
let title = callsites[i].getElementsByTagName('title')[0];
|
||||
let rect = callsites[i].getElementsByTagName('rect')[0];
|
||||
if (title.textContent.indexOf(term) != -1) {
|
||||
rect.attributes['fill'].value = 'rgb(230,100,230)';
|
||||
} else {
|
||||
rect.attributes['fill'].value = rect.attributes['ofill'].value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let selected;
|
||||
document.addEventListener('keydown', (e) => {
|
||||
if (!selected) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let nav = selected.attributes['nav'].value.split(',');
|
||||
let navigation_index;
|
||||
switch (e.keyCode) {
|
||||
// case 38: // ARROW UP
|
||||
case 87: navigation_index = 0; break; // W
|
||||
|
||||
// case 32 : // ARROW LEFT
|
||||
case 65: navigation_index = 1; break; // A
|
||||
|
||||
// case 43: // ARROW DOWN
|
||||
case 68: navigation_index = 3; break; // S
|
||||
|
||||
// case 39: // ARROW RIGHT
|
||||
case 83: navigation_index = 2; break; // D
|
||||
|
||||
case 32: zoom(selected); return false; // SPACE
|
||||
|
||||
case 8: // BACKSPACE
|
||||
unzoom(selected); return false;
|
||||
default: return true;
|
||||
}
|
||||
|
||||
if (nav[navigation_index] == '0') {
|
||||
return false;
|
||||
}
|
||||
|
||||
let target_element = selected.ownerSVGElement.getElementById(nav[navigation_index]);
|
||||
select(target_element);
|
||||
return false;
|
||||
});
|
||||
|
||||
function select(e) {
|
||||
if (selected) {
|
||||
selected.classList.remove('s');
|
||||
}
|
||||
selected = e;
|
||||
selected.classList.add('s');
|
||||
|
||||
// Update info bar
|
||||
let titleElement = selected.getElementsByTagName('title')[0];
|
||||
let text = titleElement.textContent;
|
||||
|
||||
// Parse title
|
||||
let method_and_info = text.split(' | ');
|
||||
let methodName = method_and_info[0];
|
||||
let info = method_and_info[1];
|
||||
|
||||
// Parse info
|
||||
// '/system/lib64/libhwbinder.so (4 events: 0.28%)'
|
||||
let regexp = /(.*) \((.*)\)/g;
|
||||
let match = regexp.exec(info);
|
||||
if (match.length > 2) {
|
||||
let percentage = match[2];
|
||||
// Write percentage
|
||||
let percentageTextElement = selected.ownerSVGElement.getElementById('percent_text');
|
||||
percentageTextElement.textContent = percentage;
|
||||
// console.log("'" + percentage + "'")
|
||||
}
|
||||
|
||||
// Set fields
|
||||
let barTextElement = selected.ownerSVGElement.getElementById('info_text');
|
||||
barTextElement.textContent = methodName;
|
||||
}
|
|
@ -1,204 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2016 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
|
||||
SVG_NODE_HEIGHT = 17
|
||||
FONT_SIZE = 12
|
||||
|
||||
UNZOOM_NODE_ORIGIN_X = 10
|
||||
UNZOOM_NODE_WIDTH = 80
|
||||
INFO_NODE_ORIGIN_X = 120
|
||||
INFO_NODE_WIDTH = 800
|
||||
PERCENT_NODE_ORIGIN_X = 930
|
||||
PERCENT_NODE_WIDTH = 250
|
||||
SEARCH_NODE_ORIGIN_X = 1190
|
||||
SEARCH_NODE_WIDTH = 80
|
||||
RECT_TEXT_PADDING = 10
|
||||
|
||||
|
||||
def hash_to_float(string):
|
||||
return hash(string) / float(sys.maxsize)
|
||||
|
||||
|
||||
def getLegacyColor(method):
|
||||
r = 175 + int(50 * hash_to_float(reversed(method)))
|
||||
g = 60 + int(180 * hash_to_float(method))
|
||||
b = 60 + int(55 * hash_to_float(reversed(method)))
|
||||
return (r, g, b)
|
||||
|
||||
|
||||
def getDSOColor(method):
|
||||
r = 170 + int(80 * hash_to_float(reversed(method)))
|
||||
g = 180 + int(70 * hash_to_float((method)))
|
||||
b = 170 + int(80 * hash_to_float(reversed(method)))
|
||||
return (r, g, b)
|
||||
|
||||
|
||||
def getHeatColor(callsite, total_weight):
|
||||
r = 245 + 10 * (1 - callsite.weight() / total_weight)
|
||||
g = 110 + 105 * (1 - callsite.weight() / total_weight)
|
||||
b = 100
|
||||
return (r, g, b)
|
||||
|
||||
def get_proper_scaled_time_string(value):
|
||||
if value >= 1e9:
|
||||
return '%.3f s' % (value / 1e9)
|
||||
if value >= 1e6:
|
||||
return '%.3f ms' % (value / 1e6)
|
||||
if value >= 1e3:
|
||||
return '%.3f us' % (value / 1e3)
|
||||
return '%.0f ns' % value
|
||||
|
||||
def createSVGNode(process, callsite, depth, f, total_weight, height, color_scheme, nav):
|
||||
x = float(callsite.offset) / total_weight * 100
|
||||
y = height - (depth + 1) * SVG_NODE_HEIGHT
|
||||
width = callsite.weight() / total_weight * 100
|
||||
|
||||
method = callsite.method.replace(">", ">").replace("<", "<")
|
||||
if width <= 0:
|
||||
return
|
||||
|
||||
if color_scheme == "dso":
|
||||
r, g, b = getDSOColor(callsite.dso)
|
||||
elif color_scheme == "legacy":
|
||||
r, g, b = getLegacyColor(method)
|
||||
else:
|
||||
r, g, b = getHeatColor(callsite, total_weight)
|
||||
|
||||
r_border, g_border, b_border = [max(0, color - 50) for color in [r, g, b]]
|
||||
|
||||
if process.props['trace_offcpu']:
|
||||
weight_str = get_proper_scaled_time_string(callsite.weight())
|
||||
else:
|
||||
weight_str = "{:,}".format(int(callsite.weight())) + ' events'
|
||||
|
||||
f.write(
|
||||
"""<g id=%d class="n" onclick="zoom(this);" onmouseenter="select(this);" nav="%s">
|
||||
<title>%s | %s (%s: %3.2f%%)</title>
|
||||
<rect x="%f%%" y="%f" ox="%f" oy="%f" width="%f%%" owidth="%f" height="15.0"
|
||||
ofill="rgb(%d,%d,%d)" fill="rgb(%d,%d,%d)" style="stroke:rgb(%d,%d,%d)"/>
|
||||
<text x="%f%%" y="%f" font-size="%d" font-family="Monospace"></text>
|
||||
</g>""" %
|
||||
(callsite.id,
|
||||
','.join(str(x) for x in nav),
|
||||
method,
|
||||
callsite.dso,
|
||||
weight_str,
|
||||
callsite.weight() / total_weight * 100,
|
||||
x,
|
||||
y,
|
||||
x,
|
||||
y,
|
||||
width,
|
||||
width,
|
||||
r,
|
||||
g,
|
||||
b,
|
||||
r,
|
||||
g,
|
||||
b,
|
||||
r_border,
|
||||
g_border,
|
||||
b_border,
|
||||
x,
|
||||
y + 12,
|
||||
FONT_SIZE))
|
||||
|
||||
|
||||
def renderSVGNodes(process, flamegraph, depth, f, total_weight, height, color_scheme):
|
||||
for i, child in enumerate(flamegraph.children):
|
||||
# Prebuild navigation target for wasd
|
||||
|
||||
if i == 0:
|
||||
left_index = 0
|
||||
else:
|
||||
left_index = flamegraph.children[i - 1].id
|
||||
|
||||
if i == len(flamegraph.children) - 1:
|
||||
right_index = 0
|
||||
else:
|
||||
right_index = flamegraph.children[i + 1].id
|
||||
|
||||
up_index = max(child.children, key=lambda x: x.weight()).id if child.children else 0
|
||||
|
||||
# up, left, down, right
|
||||
nav = [up_index, left_index, flamegraph.id, right_index]
|
||||
|
||||
createSVGNode(process, child, depth, f, total_weight, height, color_scheme, nav)
|
||||
# Recurse down
|
||||
renderSVGNodes(process, child, depth + 1, f, total_weight, height, color_scheme)
|
||||
|
||||
|
||||
def renderSearchNode(f):
|
||||
f.write(
|
||||
"""<rect id="search_rect" style="stroke:rgb(0,0,0);" onclick="search(this);" class="t"
|
||||
rx="10" ry="10" x="%d" y="10" width="%d" height="30" fill="rgb(255,255,255)""/>
|
||||
<text id="search_text" class="t" x="%d" y="30" onclick="search(this);">Search</text>
|
||||
""" % (SEARCH_NODE_ORIGIN_X, SEARCH_NODE_WIDTH, SEARCH_NODE_ORIGIN_X + RECT_TEXT_PADDING))
|
||||
|
||||
|
||||
def renderUnzoomNode(f):
|
||||
f.write(
|
||||
"""<rect id="zoom_rect" style="display:none;stroke:rgb(0,0,0);" class="t"
|
||||
onclick="unzoom(this);" rx="10" ry="10" x="%d" y="10" width="%d" height="30"
|
||||
fill="rgb(255,255,255)"/>
|
||||
<text id="zoom_text" style="display:none;" class="t" x="%d" y="30"
|
||||
onclick="unzoom(this);">Zoom out</text>
|
||||
""" % (UNZOOM_NODE_ORIGIN_X, UNZOOM_NODE_WIDTH, UNZOOM_NODE_ORIGIN_X + RECT_TEXT_PADDING))
|
||||
|
||||
|
||||
def renderInfoNode(f):
|
||||
f.write(
|
||||
"""<clipPath id="info_clip_path"> <rect id="info_rect" style="stroke:rgb(0,0,0);"
|
||||
rx="10" ry="10" x="%d" y="10" width="%d" height="30" fill="rgb(255,255,255)"/>
|
||||
</clipPath>
|
||||
<rect id="info_rect" style="stroke:rgb(0,0,0);"
|
||||
rx="10" ry="10" x="%d" y="10" width="%d" height="30" fill="rgb(255,255,255)"/>
|
||||
<text clip-path="url(#info_clip_path)" id="info_text" x="%d" y="30"></text>
|
||||
""" % (INFO_NODE_ORIGIN_X, INFO_NODE_WIDTH, INFO_NODE_ORIGIN_X, INFO_NODE_WIDTH,
|
||||
INFO_NODE_ORIGIN_X + RECT_TEXT_PADDING))
|
||||
|
||||
|
||||
def renderPercentNode(f):
|
||||
f.write(
|
||||
"""<rect id="percent_rect" style="stroke:rgb(0,0,0);"
|
||||
rx="10" ry="10" x="%d" y="10" width="%d" height="30" fill="rgb(255,255,255)"/>
|
||||
<text id="percent_text" text-anchor="end" x="%d" y="30">100.00%%</text>
|
||||
""" % (PERCENT_NODE_ORIGIN_X, PERCENT_NODE_WIDTH,
|
||||
PERCENT_NODE_ORIGIN_X + PERCENT_NODE_WIDTH - RECT_TEXT_PADDING))
|
||||
|
||||
|
||||
def renderSVG(process, flamegraph, f, color_scheme):
|
||||
height = (flamegraph.get_max_depth() + 2) * SVG_NODE_HEIGHT
|
||||
f.write("""<div class="flamegraph_block" style="width:100%%; height:%dpx;">
|
||||
""" % height)
|
||||
f.write("""<svg xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1"
|
||||
width="100%%" height="100%%" style="border: 1px solid black;"
|
||||
rootid="%d">
|
||||
""" % (flamegraph.children[0].id))
|
||||
f.write("""<defs > <linearGradient id="background_gradiant" y1="0" y2="1" x1="0" x2="0" >
|
||||
<stop stop-color="#eeeeee" offset="5%" /> <stop stop-color="#efefb1" offset="90%" />
|
||||
</linearGradient> </defs>""")
|
||||
f.write("""<rect x="0.0" y="0" width="100%" height="100%" fill="url(#background_gradiant)" />
|
||||
""")
|
||||
renderSVGNodes(process, flamegraph, 0, f, flamegraph.weight(), height, color_scheme)
|
||||
renderSearchNode(f)
|
||||
renderUnzoomNode(f)
|
||||
renderInfoNode(f)
|
||||
renderPercentNode(f)
|
||||
f.write("</svg></div><br/>\n\n")
|
File diff suppressed because it is too large
Load Diff
|
@ -1,930 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2017 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from simpleperf_report_lib import ReportLib
|
||||
from utils import *
|
||||
|
||||
|
||||
class HtmlWriter(object):
|
||||
|
||||
def __init__(self, output_path):
|
||||
self.fh = open(output_path, 'w')
|
||||
self.tag_stack = []
|
||||
|
||||
def close(self):
|
||||
self.fh.close()
|
||||
|
||||
def open_tag(self, tag, **attrs):
|
||||
attr_str = ''
|
||||
for key in attrs:
|
||||
attr_str += ' %s="%s"' % (key, attrs[key])
|
||||
self.fh.write('<%s%s>' % (tag, attr_str))
|
||||
self.tag_stack.append(tag)
|
||||
return self
|
||||
|
||||
def close_tag(self, tag=None):
|
||||
if tag:
|
||||
assert tag == self.tag_stack[-1]
|
||||
self.fh.write('</%s>\n' % self.tag_stack.pop())
|
||||
|
||||
def add(self, text):
|
||||
self.fh.write(text)
|
||||
return self
|
||||
|
||||
def add_file(self, file_path):
|
||||
file_path = os.path.join(get_script_dir(), file_path)
|
||||
with open(file_path, 'r') as f:
|
||||
self.add(f.read())
|
||||
return self
|
||||
|
||||
def modify_text_for_html(text):
|
||||
return text.replace('>', '>').replace('<', '<')
|
||||
|
||||
class EventScope(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.processes = {} # map from pid to ProcessScope
|
||||
self.sample_count = 0
|
||||
self.event_count = 0
|
||||
|
||||
def get_process(self, pid):
|
||||
process = self.processes.get(pid)
|
||||
if not process:
|
||||
process = self.processes[pid] = ProcessScope(pid)
|
||||
return process
|
||||
|
||||
def get_sample_info(self, gen_addr_hit_map):
|
||||
result = {}
|
||||
result['eventName'] = self.name
|
||||
result['eventCount'] = self.event_count
|
||||
result['processes'] = [process.get_sample_info(gen_addr_hit_map)
|
||||
for process in self.processes.values()]
|
||||
return result
|
||||
|
||||
|
||||
class ProcessScope(object):
|
||||
|
||||
def __init__(self, pid):
|
||||
self.pid = pid
|
||||
self.name = ''
|
||||
self.event_count = 0
|
||||
self.threads = {} # map from tid to ThreadScope
|
||||
|
||||
def get_thread(self, tid, thread_name):
|
||||
thread = self.threads.get(tid)
|
||||
if not thread:
|
||||
thread = self.threads[tid] = ThreadScope(tid)
|
||||
thread.name = thread_name
|
||||
if self.pid == tid:
|
||||
self.name = thread_name
|
||||
return thread
|
||||
|
||||
def get_sample_info(self, gen_addr_hit_map):
|
||||
result = {}
|
||||
result['pid'] = self.pid
|
||||
result['eventCount'] = self.event_count
|
||||
result['threads'] = [thread.get_sample_info(gen_addr_hit_map)
|
||||
for thread in self.threads.values()]
|
||||
return result
|
||||
|
||||
|
||||
class ThreadScope(object):
|
||||
|
||||
def __init__(self, tid):
|
||||
self.tid = tid
|
||||
self.name = ''
|
||||
self.event_count = 0
|
||||
self.libs = {} # map from lib_id to LibScope
|
||||
|
||||
def add_callstack(self, event_count, callstack, build_addr_hit_map):
|
||||
""" callstack is a list of tuple (lib_id, func_id, addr).
|
||||
For each i > 0, callstack[i] calls callstack[i-1]."""
|
||||
hit_func_ids = set()
|
||||
for i in range(len(callstack)):
|
||||
lib_id, func_id, addr = callstack[i]
|
||||
# When a callstack contains recursive function, only add for each function once.
|
||||
if func_id in hit_func_ids:
|
||||
continue
|
||||
hit_func_ids.add(func_id)
|
||||
|
||||
lib = self.libs.get(lib_id)
|
||||
if not lib:
|
||||
lib = self.libs[lib_id] = LibScope(lib_id)
|
||||
function = lib.get_function(func_id)
|
||||
if i == 0:
|
||||
lib.event_count += event_count
|
||||
function.sample_count += 1
|
||||
function.add_reverse_callchain(callstack, i + 1, len(callstack), event_count)
|
||||
|
||||
if build_addr_hit_map:
|
||||
function.build_addr_hit_map(addr, event_count if i == 0 else 0, event_count)
|
||||
|
||||
hit_func_ids.clear()
|
||||
for i in range(len(callstack) - 1, -1, -1):
|
||||
lib_id, func_id, _ = callstack[i]
|
||||
# When a callstack contains recursive function, only add for each function once.
|
||||
if func_id in hit_func_ids:
|
||||
continue
|
||||
hit_func_ids.add(func_id)
|
||||
lib = self.libs.get(lib_id)
|
||||
lib.get_function(func_id).add_callchain(callstack, i - 1, -1, event_count)
|
||||
|
||||
def get_sample_info(self, gen_addr_hit_map):
|
||||
result = {}
|
||||
result['tid'] = self.tid
|
||||
result['eventCount'] = self.event_count
|
||||
result['libs'] = [lib.gen_sample_info(gen_addr_hit_map)
|
||||
for lib in self.libs.values()]
|
||||
return result
|
||||
|
||||
|
||||
class LibScope(object):
|
||||
|
||||
def __init__(self, lib_id):
|
||||
self.lib_id = lib_id
|
||||
self.event_count = 0
|
||||
self.functions = {} # map from func_id to FunctionScope.
|
||||
|
||||
def get_function(self, func_id):
|
||||
function = self.functions.get(func_id)
|
||||
if not function:
|
||||
function = self.functions[func_id] = FunctionScope(func_id)
|
||||
return function
|
||||
|
||||
def gen_sample_info(self, gen_addr_hit_map):
|
||||
result = {}
|
||||
result['libId'] = self.lib_id
|
||||
result['eventCount'] = self.event_count
|
||||
result['functions'] = [func.gen_sample_info(gen_addr_hit_map)
|
||||
for func in self.functions.values()]
|
||||
return result
|
||||
|
||||
|
||||
class FunctionScope(object):
|
||||
|
||||
def __init__(self, func_id):
|
||||
self.sample_count = 0
|
||||
self.call_graph = CallNode(func_id)
|
||||
self.reverse_call_graph = CallNode(func_id)
|
||||
self.addr_hit_map = None # map from addr to [event_count, subtree_event_count].
|
||||
# map from (source_file_id, line) to [event_count, subtree_event_count].
|
||||
self.line_hit_map = None
|
||||
|
||||
def add_callchain(self, callchain, start, end, event_count):
|
||||
node = self.call_graph
|
||||
for i in range(start, end, -1):
|
||||
node = node.get_child(callchain[i][1])
|
||||
node.event_count += event_count
|
||||
|
||||
def add_reverse_callchain(self, callchain, start, end, event_count):
|
||||
node = self.reverse_call_graph
|
||||
for i in range(start, end):
|
||||
node = node.get_child(callchain[i][1])
|
||||
node.event_count += event_count
|
||||
|
||||
def build_addr_hit_map(self, addr, event_count, subtree_event_count):
|
||||
if self.addr_hit_map is None:
|
||||
self.addr_hit_map = {}
|
||||
count_info = self.addr_hit_map.get(addr)
|
||||
if count_info is None:
|
||||
self.addr_hit_map[addr] = [event_count, subtree_event_count]
|
||||
else:
|
||||
count_info[0] += event_count
|
||||
count_info[1] += subtree_event_count
|
||||
|
||||
def build_line_hit_map(self, source_file_id, line, event_count, subtree_event_count):
|
||||
if self.line_hit_map is None:
|
||||
self.line_hit_map = {}
|
||||
key = (source_file_id, line)
|
||||
count_info = self.line_hit_map.get(key)
|
||||
if count_info is None:
|
||||
self.line_hit_map[key] = [event_count, subtree_event_count]
|
||||
else:
|
||||
count_info[0] += event_count
|
||||
count_info[1] += subtree_event_count
|
||||
|
||||
def update_subtree_event_count(self):
|
||||
a = self.call_graph.update_subtree_event_count()
|
||||
b = self.reverse_call_graph.update_subtree_event_count()
|
||||
return max(a, b)
|
||||
|
||||
def limit_callchain_percent(self, min_callchain_percent, hit_func_ids):
|
||||
min_limit = min_callchain_percent * 0.01 * self.call_graph.subtree_event_count
|
||||
self.call_graph.cut_edge(min_limit, hit_func_ids)
|
||||
self.reverse_call_graph.cut_edge(min_limit, hit_func_ids)
|
||||
|
||||
def gen_sample_info(self, gen_addr_hit_map):
|
||||
result = {}
|
||||
result['c'] = self.sample_count
|
||||
result['g'] = self.call_graph.gen_sample_info()
|
||||
result['rg'] = self.reverse_call_graph.gen_sample_info()
|
||||
if self.line_hit_map:
|
||||
items = []
|
||||
for key in self.line_hit_map:
|
||||
count_info = self.line_hit_map[key]
|
||||
item = {'f': key[0], 'l': key[1], 'e': count_info[0], 's': count_info[1]}
|
||||
items.append(item)
|
||||
result['s'] = items
|
||||
if gen_addr_hit_map and self.addr_hit_map:
|
||||
items = []
|
||||
for addr in sorted(self.addr_hit_map):
|
||||
count_info = self.addr_hit_map[addr]
|
||||
items.append({'a': addr, 'e': count_info[0], 's': count_info[1]})
|
||||
result['a'] = items
|
||||
return result
|
||||
|
||||
|
||||
class CallNode(object):
|
||||
|
||||
def __init__(self, func_id):
|
||||
self.event_count = 0
|
||||
self.subtree_event_count = 0
|
||||
self.func_id = func_id
|
||||
self.children = {} # map from func_id to CallNode
|
||||
|
||||
def get_child(self, func_id):
|
||||
child = self.children.get(func_id)
|
||||
if not child:
|
||||
child = self.children[func_id] = CallNode(func_id)
|
||||
return child
|
||||
|
||||
def update_subtree_event_count(self):
|
||||
self.subtree_event_count = self.event_count
|
||||
for child in self.children.values():
|
||||
self.subtree_event_count += child.update_subtree_event_count()
|
||||
return self.subtree_event_count
|
||||
|
||||
def cut_edge(self, min_limit, hit_func_ids):
|
||||
hit_func_ids.add(self.func_id)
|
||||
to_del_children = []
|
||||
for key in self.children:
|
||||
child = self.children[key]
|
||||
if child.subtree_event_count < min_limit:
|
||||
to_del_children.append(key)
|
||||
else:
|
||||
child.cut_edge(min_limit, hit_func_ids)
|
||||
for key in to_del_children:
|
||||
del self.children[key]
|
||||
|
||||
def gen_sample_info(self):
|
||||
result = {}
|
||||
result['e'] = self.event_count
|
||||
result['s'] = self.subtree_event_count
|
||||
result['f'] = self.func_id
|
||||
result['c'] = [child.gen_sample_info() for child in self.children.values()]
|
||||
return result
|
||||
|
||||
|
||||
class LibSet(object):
|
||||
""" Collection of shared libraries used in perf.data. """
|
||||
def __init__(self):
|
||||
self.lib_name_to_id = {}
|
||||
self.lib_id_to_name = []
|
||||
|
||||
def get_lib_id(self, lib_name):
|
||||
lib_id = self.lib_name_to_id.get(lib_name)
|
||||
if lib_id is None:
|
||||
lib_id = len(self.lib_id_to_name)
|
||||
self.lib_name_to_id[lib_name] = lib_id
|
||||
self.lib_id_to_name.append(lib_name)
|
||||
return lib_id
|
||||
|
||||
def get_lib_name(self, lib_id):
|
||||
return self.lib_id_to_name[lib_id]
|
||||
|
||||
|
||||
class Function(object):
|
||||
""" Represent a function in a shared library. """
|
||||
def __init__(self, lib_id, func_name, func_id, start_addr, addr_len):
|
||||
self.lib_id = lib_id
|
||||
self.func_name = func_name
|
||||
self.func_id = func_id
|
||||
self.start_addr = start_addr
|
||||
self.addr_len = addr_len
|
||||
self.source_info = None
|
||||
self.disassembly = None
|
||||
|
||||
|
||||
class FunctionSet(object):
|
||||
""" Collection of functions used in perf.data. """
|
||||
def __init__(self):
|
||||
self.name_to_func = {}
|
||||
self.id_to_func = {}
|
||||
|
||||
def get_func_id(self, lib_id, symbol):
|
||||
key = (lib_id, symbol.symbol_name)
|
||||
function = self.name_to_func.get(key)
|
||||
if function is None:
|
||||
func_id = len(self.id_to_func)
|
||||
function = Function(lib_id, symbol.symbol_name, func_id, symbol.symbol_addr,
|
||||
symbol.symbol_len)
|
||||
self.name_to_func[key] = function
|
||||
self.id_to_func[func_id] = function
|
||||
return function.func_id
|
||||
|
||||
def trim_functions(self, left_func_ids):
|
||||
""" Remove functions excepts those in left_func_ids. """
|
||||
for function in self.name_to_func.values():
|
||||
if function.func_id not in left_func_ids:
|
||||
del self.id_to_func[function.func_id]
|
||||
# name_to_func will not be used.
|
||||
self.name_to_func = None
|
||||
|
||||
|
||||
class SourceFile(object):
|
||||
""" A source file containing source code hit by samples. """
|
||||
def __init__(self, file_id, abstract_path):
|
||||
self.file_id = file_id
|
||||
self.abstract_path = abstract_path # path reported by addr2line
|
||||
self.real_path = None # file path in the file system
|
||||
self.requested_lines = set()
|
||||
self.line_to_code = {} # map from line to code in that line.
|
||||
|
||||
def request_lines(self, start_line, end_line):
|
||||
self.requested_lines |= set(range(start_line, end_line + 1))
|
||||
|
||||
def add_source_code(self, real_path):
|
||||
self.real_path = real_path
|
||||
with open(real_path, 'r') as f:
|
||||
source_code = f.readlines()
|
||||
max_line = len(source_code)
|
||||
for line in self.requested_lines:
|
||||
if line > 0 and line <= max_line:
|
||||
self.line_to_code[line] = source_code[line - 1]
|
||||
# requested_lines is no longer used.
|
||||
self.requested_lines = None
|
||||
|
||||
|
||||
class SourceFileSet(object):
|
||||
""" Collection of source files. """
|
||||
def __init__(self):
|
||||
self.path_to_source_files = {} # map from file path to SourceFile.
|
||||
|
||||
def get_source_file(self, file_path):
|
||||
source_file = self.path_to_source_files.get(file_path)
|
||||
if source_file is None:
|
||||
source_file = SourceFile(len(self.path_to_source_files), file_path)
|
||||
self.path_to_source_files[file_path] = source_file
|
||||
return source_file
|
||||
|
||||
def load_source_code(self, source_dirs):
|
||||
file_searcher = SourceFileSearcher(source_dirs)
|
||||
for source_file in self.path_to_source_files.values():
|
||||
real_path = file_searcher.get_real_path(source_file.abstract_path)
|
||||
if real_path:
|
||||
source_file.add_source_code(real_path)
|
||||
|
||||
|
||||
class SourceFileSearcher(object):
|
||||
|
||||
SOURCE_FILE_EXTS = {'.h', '.hh', '.H', '.hxx', '.hpp', '.h++',
|
||||
'.c', '.cc', '.C', '.cxx', '.cpp', '.c++',
|
||||
'.java', '.kt'}
|
||||
|
||||
@classmethod
|
||||
def is_source_filename(cls, filename):
|
||||
ext = os.path.splitext(filename)[1]
|
||||
return ext in cls.SOURCE_FILE_EXTS
|
||||
|
||||
"""" Find source file paths in the file system.
|
||||
The file paths reported by addr2line are the paths stored in debug sections
|
||||
of shared libraries. And we need to convert them to file paths in the file
|
||||
system. It is done in below steps:
|
||||
1. Collect all file paths under the provided source_dirs. The suffix of a
|
||||
source file should contain one of below:
|
||||
h: for C/C++ header files.
|
||||
c: for C/C++ source files.
|
||||
java: for Java source files.
|
||||
kt: for Kotlin source files.
|
||||
2. Given an abstract_path reported by addr2line, select the best real path
|
||||
as below:
|
||||
2.1 Find all real paths with the same file name as the abstract path.
|
||||
2.2 Select the real path having the longest common suffix with the abstract path.
|
||||
"""
|
||||
def __init__(self, source_dirs):
|
||||
# Map from filename to a list of reversed directory path containing filename.
|
||||
self.filename_to_rparents = {}
|
||||
self._collect_paths(source_dirs)
|
||||
|
||||
def _collect_paths(self, source_dirs):
|
||||
for source_dir in source_dirs:
|
||||
for parent, _, file_names in os.walk(source_dir):
|
||||
rparent = None
|
||||
for file_name in file_names:
|
||||
if self.is_source_filename(file_name):
|
||||
rparents = self.filename_to_rparents.get(file_name)
|
||||
if rparents is None:
|
||||
rparents = self.filename_to_rparents[file_name] = []
|
||||
if rparent is None:
|
||||
rparent = parent[::-1]
|
||||
rparents.append(rparent)
|
||||
|
||||
def get_real_path(self, abstract_path):
|
||||
abstract_path = abstract_path.replace('/', os.sep)
|
||||
abstract_parent, file_name = os.path.split(abstract_path)
|
||||
abstract_rparent = abstract_parent[::-1]
|
||||
real_rparents = self.filename_to_rparents.get(file_name)
|
||||
if real_rparents is None:
|
||||
return None
|
||||
best_matched_rparent = None
|
||||
best_common_length = -1
|
||||
for real_rparent in real_rparents:
|
||||
length = len(os.path.commonprefix((real_rparent, abstract_rparent)))
|
||||
if length > best_common_length:
|
||||
best_common_length = length
|
||||
best_matched_rparent = real_rparent
|
||||
if best_matched_rparent is None:
|
||||
return None
|
||||
return os.path.join(best_matched_rparent[::-1], file_name)
|
||||
|
||||
|
||||
class RecordData(object):
|
||||
|
||||
"""RecordData reads perf.data, and generates data used by report.js in json format.
|
||||
All generated items are listed as below:
|
||||
1. recordTime: string
|
||||
2. machineType: string
|
||||
3. androidVersion: string
|
||||
4. recordCmdline: string
|
||||
5. totalSamples: int
|
||||
6. processNames: map from pid to processName.
|
||||
7. threadNames: map from tid to threadName.
|
||||
8. libList: an array of libNames, indexed by libId.
|
||||
9. functionMap: map from functionId to funcData.
|
||||
funcData = {
|
||||
l: libId
|
||||
f: functionName
|
||||
s: [sourceFileId, startLine, endLine] [optional]
|
||||
d: [(disassembly, addr)] [optional]
|
||||
}
|
||||
|
||||
10. sampleInfo = [eventInfo]
|
||||
eventInfo = {
|
||||
eventName
|
||||
eventCount
|
||||
processes: [processInfo]
|
||||
}
|
||||
processInfo = {
|
||||
pid
|
||||
eventCount
|
||||
threads: [threadInfo]
|
||||
}
|
||||
threadInfo = {
|
||||
tid
|
||||
eventCount
|
||||
libs: [libInfo],
|
||||
}
|
||||
libInfo = {
|
||||
libId,
|
||||
eventCount,
|
||||
functions: [funcInfo]
|
||||
}
|
||||
funcInfo = {
|
||||
c: sampleCount
|
||||
g: callGraph
|
||||
rg: reverseCallgraph
|
||||
s: [sourceCodeInfo] [optional]
|
||||
a: [addrInfo] (sorted by addrInfo.addr) [optional]
|
||||
}
|
||||
callGraph and reverseCallGraph are both of type CallNode.
|
||||
callGraph shows how a function calls other functions.
|
||||
reverseCallGraph shows how a function is called by other functions.
|
||||
CallNode {
|
||||
e: selfEventCount
|
||||
s: subTreeEventCount
|
||||
f: functionId
|
||||
c: [CallNode] # children
|
||||
}
|
||||
|
||||
sourceCodeInfo {
|
||||
f: sourceFileId
|
||||
l: line
|
||||
e: eventCount
|
||||
s: subtreeEventCount
|
||||
}
|
||||
|
||||
addrInfo {
|
||||
a: addr
|
||||
e: eventCount
|
||||
s: subtreeEventCount
|
||||
}
|
||||
|
||||
11. sourceFiles: an array of sourceFile, indexed by sourceFileId.
|
||||
sourceFile {
|
||||
path
|
||||
code: # a map from line to code for that line.
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, binary_cache_path, ndk_path, build_addr_hit_map):
|
||||
self.binary_cache_path = binary_cache_path
|
||||
self.ndk_path = ndk_path
|
||||
self.build_addr_hit_map = build_addr_hit_map
|
||||
self.meta_info = None
|
||||
self.cmdline = None
|
||||
self.arch = None
|
||||
self.events = {}
|
||||
self.libs = LibSet()
|
||||
self.functions = FunctionSet()
|
||||
self.total_samples = 0
|
||||
self.source_files = SourceFileSet()
|
||||
self.gen_addr_hit_map_in_record_info = False
|
||||
|
||||
def load_record_file(self, record_file, show_art_frames):
|
||||
lib = ReportLib()
|
||||
lib.SetRecordFile(record_file)
|
||||
# If not showing ip for unknown symbols, the percent of the unknown symbol may be
|
||||
# accumulated to very big, and ranks first in the sample table.
|
||||
lib.ShowIpForUnknownSymbol()
|
||||
if show_art_frames:
|
||||
lib.ShowArtFrames()
|
||||
if self.binary_cache_path:
|
||||
lib.SetSymfs(self.binary_cache_path)
|
||||
self.meta_info = lib.MetaInfo()
|
||||
self.cmdline = lib.GetRecordCmd()
|
||||
self.arch = lib.GetArch()
|
||||
while True:
|
||||
raw_sample = lib.GetNextSample()
|
||||
if not raw_sample:
|
||||
lib.Close()
|
||||
break
|
||||
raw_event = lib.GetEventOfCurrentSample()
|
||||
symbol = lib.GetSymbolOfCurrentSample()
|
||||
callchain = lib.GetCallChainOfCurrentSample()
|
||||
event = self._get_event(raw_event.name)
|
||||
self.total_samples += 1
|
||||
event.sample_count += 1
|
||||
event.event_count += raw_sample.period
|
||||
process = event.get_process(raw_sample.pid)
|
||||
process.event_count += raw_sample.period
|
||||
thread = process.get_thread(raw_sample.tid, raw_sample.thread_comm)
|
||||
thread.event_count += raw_sample.period
|
||||
|
||||
lib_id = self.libs.get_lib_id(symbol.dso_name)
|
||||
func_id = self.functions.get_func_id(lib_id, symbol)
|
||||
callstack = [(lib_id, func_id, symbol.vaddr_in_file)]
|
||||
for i in range(callchain.nr):
|
||||
symbol = callchain.entries[i].symbol
|
||||
lib_id = self.libs.get_lib_id(symbol.dso_name)
|
||||
func_id = self.functions.get_func_id(lib_id, symbol)
|
||||
callstack.append((lib_id, func_id, symbol.vaddr_in_file))
|
||||
thread.add_callstack(raw_sample.period, callstack, self.build_addr_hit_map)
|
||||
|
||||
for event in self.events.values():
|
||||
for process in event.processes.values():
|
||||
for thread in process.threads.values():
|
||||
for lib in thread.libs.values():
|
||||
for func_id in lib.functions:
|
||||
function = lib.functions[func_id]
|
||||
function.update_subtree_event_count()
|
||||
|
||||
def limit_percents(self, min_func_percent, min_callchain_percent):
|
||||
hit_func_ids = set()
|
||||
for event in self.events.values():
|
||||
min_limit = event.event_count * min_func_percent * 0.01
|
||||
for process in event.processes.values():
|
||||
for thread in process.threads.values():
|
||||
for lib in thread.libs.values():
|
||||
to_del_func_ids = []
|
||||
for func_id in lib.functions:
|
||||
function = lib.functions[func_id]
|
||||
if function.call_graph.subtree_event_count < min_limit:
|
||||
to_del_func_ids.append(func_id)
|
||||
else:
|
||||
function.limit_callchain_percent(min_callchain_percent,
|
||||
hit_func_ids)
|
||||
for func_id in to_del_func_ids:
|
||||
del lib.functions[func_id]
|
||||
self.functions.trim_functions(hit_func_ids)
|
||||
|
||||
def _get_event(self, event_name):
|
||||
if event_name not in self.events:
|
||||
self.events[event_name] = EventScope(event_name)
|
||||
return self.events[event_name]
|
||||
|
||||
def add_source_code(self, source_dirs):
|
||||
""" Collect source code information:
|
||||
1. Find line ranges for each function in FunctionSet.
|
||||
2. Find line for each addr in FunctionScope.addr_hit_map.
|
||||
3. Collect needed source code in SourceFileSet.
|
||||
"""
|
||||
addr2line = Addr2Nearestline(self.ndk_path, self.binary_cache_path)
|
||||
# Request line range for each function.
|
||||
for function in self.functions.id_to_func.values():
|
||||
if function.func_name == 'unknown':
|
||||
continue
|
||||
lib_name = self.libs.get_lib_name(function.lib_id)
|
||||
addr2line.add_addr(lib_name, function.start_addr, function.start_addr)
|
||||
addr2line.add_addr(lib_name, function.start_addr,
|
||||
function.start_addr + function.addr_len - 1)
|
||||
# Request line for each addr in FunctionScope.addr_hit_map.
|
||||
for event in self.events.values():
|
||||
for process in event.processes.values():
|
||||
for thread in process.threads.values():
|
||||
for lib in thread.libs.values():
|
||||
lib_name = self.libs.get_lib_name(lib.lib_id)
|
||||
for function in lib.functions.values():
|
||||
func_addr = self.functions.id_to_func[
|
||||
function.call_graph.func_id].start_addr
|
||||
for addr in function.addr_hit_map:
|
||||
addr2line.add_addr(lib_name, func_addr, addr)
|
||||
addr2line.convert_addrs_to_lines()
|
||||
|
||||
# Set line range for each function.
|
||||
for function in self.functions.id_to_func.values():
|
||||
if function.func_name == 'unknown':
|
||||
continue
|
||||
dso = addr2line.get_dso(self.libs.get_lib_name(function.lib_id))
|
||||
start_source = addr2line.get_addr_source(dso, function.start_addr)
|
||||
end_source = addr2line.get_addr_source(dso,
|
||||
function.start_addr + function.addr_len - 1)
|
||||
if not start_source or not end_source:
|
||||
continue
|
||||
start_file_path, start_line = start_source[-1]
|
||||
end_file_path, end_line = end_source[-1]
|
||||
if start_file_path != end_file_path or start_line > end_line:
|
||||
continue
|
||||
source_file = self.source_files.get_source_file(start_file_path)
|
||||
source_file.request_lines(start_line, end_line)
|
||||
function.source_info = (source_file.file_id, start_line, end_line)
|
||||
|
||||
# Build FunctionScope.line_hit_map.
|
||||
for event in self.events.values():
|
||||
for process in event.processes.values():
|
||||
for thread in process.threads.values():
|
||||
for lib in thread.libs.values():
|
||||
dso = addr2line.get_dso(self.libs.get_lib_name(lib.lib_id))
|
||||
for function in lib.functions.values():
|
||||
for addr in function.addr_hit_map:
|
||||
source = addr2line.get_addr_source(dso, addr)
|
||||
if not source:
|
||||
continue
|
||||
for file_path, line in source:
|
||||
source_file = self.source_files.get_source_file(file_path)
|
||||
# Show [line - 5, line + 5] of the line hit by a sample.
|
||||
source_file.request_lines(line - 5, line + 5)
|
||||
count_info = function.addr_hit_map[addr]
|
||||
function.build_line_hit_map(source_file.file_id, line,
|
||||
count_info[0], count_info[1])
|
||||
|
||||
# Collect needed source code in SourceFileSet.
|
||||
self.source_files.load_source_code(source_dirs)
|
||||
|
||||
def add_disassembly(self):
|
||||
""" Collect disassembly information:
|
||||
1. Use objdump to collect disassembly for each function in FunctionSet.
|
||||
2. Set flag to dump addr_hit_map when generating record info.
|
||||
"""
|
||||
objdump = Objdump(self.ndk_path, self.binary_cache_path)
|
||||
for function in self.functions.id_to_func.values():
|
||||
if function.func_name == 'unknown':
|
||||
continue
|
||||
lib_name = self.libs.get_lib_name(function.lib_id)
|
||||
code = objdump.disassemble_code(lib_name, function.start_addr, function.addr_len)
|
||||
function.disassembly = code
|
||||
|
||||
self.gen_addr_hit_map_in_record_info = True
|
||||
|
||||
def gen_record_info(self):
|
||||
record_info = {}
|
||||
timestamp = self.meta_info.get('timestamp')
|
||||
if timestamp:
|
||||
t = datetime.datetime.fromtimestamp(int(timestamp))
|
||||
else:
|
||||
t = datetime.datetime.now()
|
||||
record_info['recordTime'] = t.strftime('%Y-%m-%d (%A) %H:%M:%S')
|
||||
|
||||
product_props = self.meta_info.get('product_props')
|
||||
machine_type = self.arch
|
||||
if product_props:
|
||||
manufacturer, model, name = product_props.split(':')
|
||||
machine_type = '%s (%s) by %s, arch %s' % (model, name, manufacturer, self.arch)
|
||||
record_info['machineType'] = machine_type
|
||||
record_info['androidVersion'] = self.meta_info.get('android_version', '')
|
||||
record_info['recordCmdline'] = self.cmdline
|
||||
record_info['totalSamples'] = self.total_samples
|
||||
record_info['processNames'] = self._gen_process_names()
|
||||
record_info['threadNames'] = self._gen_thread_names()
|
||||
record_info['libList'] = self._gen_lib_list()
|
||||
record_info['functionMap'] = self._gen_function_map()
|
||||
record_info['sampleInfo'] = self._gen_sample_info()
|
||||
record_info['sourceFiles'] = self._gen_source_files()
|
||||
return record_info
|
||||
|
||||
def _gen_process_names(self):
|
||||
process_names = {}
|
||||
for event in self.events.values():
|
||||
for process in event.processes.values():
|
||||
process_names[process.pid] = process.name
|
||||
return process_names
|
||||
|
||||
def _gen_thread_names(self):
|
||||
thread_names = {}
|
||||
for event in self.events.values():
|
||||
for process in event.processes.values():
|
||||
for thread in process.threads.values():
|
||||
thread_names[thread.tid] = thread.name
|
||||
return thread_names
|
||||
|
||||
def _gen_lib_list(self):
|
||||
return [modify_text_for_html(x) for x in self.libs.lib_id_to_name]
|
||||
|
||||
def _gen_function_map(self):
|
||||
func_map = {}
|
||||
for func_id in sorted(self.functions.id_to_func):
|
||||
function = self.functions.id_to_func[func_id]
|
||||
func_data = {}
|
||||
func_data['l'] = function.lib_id
|
||||
func_data['f'] = modify_text_for_html(function.func_name)
|
||||
if function.source_info:
|
||||
func_data['s'] = function.source_info
|
||||
if function.disassembly:
|
||||
disassembly_list = []
|
||||
for code, addr in function.disassembly:
|
||||
disassembly_list.append([modify_text_for_html(code), addr])
|
||||
func_data['d'] = disassembly_list
|
||||
func_map[func_id] = func_data
|
||||
return func_map
|
||||
|
||||
def _gen_sample_info(self):
|
||||
return [event.get_sample_info(self.gen_addr_hit_map_in_record_info)
|
||||
for event in self.events.values()]
|
||||
|
||||
def _gen_source_files(self):
|
||||
source_files = sorted(self.source_files.path_to_source_files.values(),
|
||||
key=lambda x: x.file_id)
|
||||
file_list = []
|
||||
for source_file in source_files:
|
||||
file_data = {}
|
||||
if not source_file.real_path:
|
||||
file_data['path'] = ''
|
||||
file_data['code'] = {}
|
||||
else:
|
||||
file_data['path'] = source_file.real_path
|
||||
code_map = {}
|
||||
for line in source_file.line_to_code:
|
||||
code_map[line] = modify_text_for_html(source_file.line_to_code[line])
|
||||
file_data['code'] = code_map
|
||||
file_list.append(file_data)
|
||||
return file_list
|
||||
|
||||
|
||||
class ReportGenerator(object):
|
||||
|
||||
def __init__(self, html_path):
|
||||
self.hw = HtmlWriter(html_path)
|
||||
self.hw.open_tag('html')
|
||||
self.hw.open_tag('head')
|
||||
self.hw.open_tag('link', rel='stylesheet', type='text/css',
|
||||
href='https://code.jquery.com/ui/1.12.0/themes/smoothness/jquery-ui.css'
|
||||
).close_tag()
|
||||
|
||||
self.hw.open_tag('link', rel='stylesheet', type='text/css',
|
||||
href='https://cdn.datatables.net/1.10.16/css/jquery.dataTables.min.css'
|
||||
).close_tag()
|
||||
self.hw.open_tag('script', src='https://www.gstatic.com/charts/loader.js').close_tag()
|
||||
self.hw.open_tag('script').add(
|
||||
"google.charts.load('current', {'packages': ['corechart', 'table']});").close_tag()
|
||||
self.hw.open_tag('script', src='https://code.jquery.com/jquery-3.2.1.js').close_tag()
|
||||
self.hw.open_tag('script', src='https://code.jquery.com/ui/1.12.1/jquery-ui.js'
|
||||
).close_tag()
|
||||
self.hw.open_tag('script',
|
||||
src='https://cdn.datatables.net/1.10.16/js/jquery.dataTables.min.js').close_tag()
|
||||
self.hw.open_tag('script',
|
||||
src='https://cdn.datatables.net/1.10.16/js/dataTables.jqueryui.min.js').close_tag()
|
||||
self.hw.open_tag('style', type='text/css').add("""
|
||||
.colForLine { width: 50px; }
|
||||
.colForCount { width: 100px; }
|
||||
.tableCell { font-size: 17px; }
|
||||
.boldTableCell { font-weight: bold; font-size: 17px; }
|
||||
""").close_tag()
|
||||
self.hw.close_tag('head')
|
||||
self.hw.open_tag('body')
|
||||
self.record_info = {}
|
||||
|
||||
def write_content_div(self):
|
||||
self.hw.open_tag('div', id='report_content').close_tag()
|
||||
|
||||
def write_record_data(self, record_data):
|
||||
self.hw.open_tag('script', id='record_data', type='application/json')
|
||||
self.hw.add(json.dumps(record_data))
|
||||
self.hw.close_tag()
|
||||
|
||||
def write_flamegraph(self, flamegraph):
|
||||
self.hw.add(flamegraph)
|
||||
|
||||
def write_script(self):
|
||||
self.hw.open_tag('script').add_file('report_html.js').close_tag()
|
||||
|
||||
def finish(self):
|
||||
self.hw.close_tag('body')
|
||||
self.hw.close_tag('html')
|
||||
self.hw.close()
|
||||
|
||||
|
||||
def gen_flamegraph(record_file, show_art_frames):
|
||||
fd, flamegraph_path = tempfile.mkstemp()
|
||||
os.close(fd)
|
||||
inferno_script_path = os.path.join(get_script_dir(), 'inferno', 'inferno.py')
|
||||
args = [sys.executable, inferno_script_path, '-sc', '-o', flamegraph_path,
|
||||
'--record_file', record_file, '--embedded_flamegraph', '--no_browser']
|
||||
if show_art_frames:
|
||||
args.append('--show_art_frames')
|
||||
subprocess.check_call(args)
|
||||
with open(flamegraph_path, 'r') as fh:
|
||||
data = fh.read()
|
||||
remove(flamegraph_path)
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='report profiling data')
|
||||
parser.add_argument('-i', '--record_file', nargs='+', default=['perf.data'], help="""
|
||||
Set profiling data file to report. Default is perf.data.""")
|
||||
parser.add_argument('-o', '--report_path', default='report.html', help="""
|
||||
Set output html file. Default is report.html.""")
|
||||
parser.add_argument('--min_func_percent', default=0.01, type=float, help="""
|
||||
Set min percentage of functions shown in the report.
|
||||
For example, when set to 0.01, only functions taking >= 0.01%% of total
|
||||
event count are collected in the report. Default is 0.01.""")
|
||||
parser.add_argument('--min_callchain_percent', default=0.01, type=float, help="""
|
||||
Set min percentage of callchains shown in the report.
|
||||
It is used to limit nodes shown in the function flamegraph. For example,
|
||||
when set to 0.01, only callchains taking >= 0.01%% of the event count of
|
||||
the starting function are collected in the report. Default is 0.01.""")
|
||||
parser.add_argument('--add_source_code', action='store_true', help='Add source code.')
|
||||
parser.add_argument('--source_dirs', nargs='+', help='Source code directories.')
|
||||
parser.add_argument('--add_disassembly', action='store_true', help='Add disassembled code.')
|
||||
parser.add_argument('--ndk_path', nargs=1, help='Find tools in the ndk path.')
|
||||
parser.add_argument('--no_browser', action='store_true', help="Don't open report in browser.")
|
||||
parser.add_argument('--show_art_frames', action='store_true',
|
||||
help='Show frames of internal methods in the ART Java interpreter.')
|
||||
args = parser.parse_args()
|
||||
|
||||
# 1. Process args.
|
||||
binary_cache_path = 'binary_cache'
|
||||
if not os.path.isdir(binary_cache_path):
|
||||
if args.add_source_code or args.add_disassembly:
|
||||
log_exit("""binary_cache/ doesn't exist. Can't add source code or disassembled code
|
||||
without collected binaries. Please run binary_cache_builder.py to
|
||||
collect binaries for current profiling data, or run app_profiler.py
|
||||
without -nb option.""")
|
||||
binary_cache_path = None
|
||||
|
||||
if args.add_source_code and not args.source_dirs:
|
||||
log_exit('--source_dirs is needed to add source code.')
|
||||
build_addr_hit_map = args.add_source_code or args.add_disassembly
|
||||
ndk_path = None if not args.ndk_path else args.ndk_path[0]
|
||||
|
||||
# 2. Produce record data.
|
||||
record_data = RecordData(binary_cache_path, ndk_path, build_addr_hit_map)
|
||||
for record_file in args.record_file:
|
||||
record_data.load_record_file(record_file, args.show_art_frames)
|
||||
record_data.limit_percents(args.min_func_percent, args.min_callchain_percent)
|
||||
if args.add_source_code:
|
||||
record_data.add_source_code(args.source_dirs)
|
||||
if args.add_disassembly:
|
||||
record_data.add_disassembly()
|
||||
|
||||
# 3. Generate report html.
|
||||
report_generator = ReportGenerator(args.report_path)
|
||||
report_generator.write_content_div()
|
||||
report_generator.write_record_data(record_data.gen_record_info())
|
||||
report_generator.write_script()
|
||||
# TODO: support multiple perf.data in flamegraph.
|
||||
if len(args.record_file) > 1:
|
||||
log_warning('flamegraph will only be shown for %s' % args.record_file[0])
|
||||
flamegraph = gen_flamegraph(args.record_file[0], args.show_art_frames)
|
||||
report_generator.write_flamegraph(flamegraph)
|
||||
report_generator.finish()
|
||||
|
||||
if not args.no_browser:
|
||||
open_report_in_browser(args.report_path)
|
||||
log_info("Report generated at '%s'." % args.report_path)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,370 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2016 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""simpleperf_report_lib.py: a python wrapper of libsimpleperf_report.so.
|
||||
Used to access samples in perf.data.
|
||||
|
||||
"""
|
||||
|
||||
import ctypes as ct
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
from utils import *
|
||||
|
||||
|
||||
def _get_native_lib():
|
||||
return get_host_binary_path('libsimpleperf_report.so')
|
||||
|
||||
|
||||
def _is_null(p):
|
||||
if p:
|
||||
return False
|
||||
return ct.cast(p, ct.c_void_p).value is None
|
||||
|
||||
|
||||
def _char_pt(s):
|
||||
return str_to_bytes(s)
|
||||
|
||||
|
||||
def _char_pt_to_str(char_pt):
|
||||
return bytes_to_str(char_pt)
|
||||
|
||||
|
||||
class SampleStruct(ct.Structure):
|
||||
""" Instance of a sample in perf.data.
|
||||
ip: the program counter of the thread generating the sample.
|
||||
pid: process id (or thread group id) of the thread generating the sample.
|
||||
tid: thread id.
|
||||
thread_comm: thread name.
|
||||
time: time at which the sample was generated. The value is in nanoseconds.
|
||||
The clock is decided by the --clockid option in `simpleperf record`.
|
||||
in_kernel: whether the instruction is in kernel space or user space.
|
||||
cpu: the cpu generating the sample.
|
||||
period: count of events have happened since last sample. For example, if we use
|
||||
-e cpu-cycles, it means how many cpu-cycles have happened.
|
||||
If we use -e cpu-clock, it means how many nanoseconds have passed.
|
||||
"""
|
||||
_fields_ = [('ip', ct.c_uint64),
|
||||
('pid', ct.c_uint32),
|
||||
('tid', ct.c_uint32),
|
||||
('thread_comm', ct.c_char_p),
|
||||
('time', ct.c_uint64),
|
||||
('in_kernel', ct.c_uint32),
|
||||
('cpu', ct.c_uint32),
|
||||
('period', ct.c_uint64)]
|
||||
|
||||
|
||||
class EventStruct(ct.Structure):
|
||||
""" Name of the event. """
|
||||
_fields_ = [('name', ct.c_char_p)]
|
||||
|
||||
|
||||
class MappingStruct(ct.Structure):
|
||||
""" A mapping area in the monitored threads, like the content in /proc/<pid>/maps.
|
||||
start: start addr in memory.
|
||||
end: end addr in memory.
|
||||
pgoff: offset in the mapped shared library.
|
||||
"""
|
||||
_fields_ = [('start', ct.c_uint64),
|
||||
('end', ct.c_uint64),
|
||||
('pgoff', ct.c_uint64)]
|
||||
|
||||
|
||||
class SymbolStruct(ct.Structure):
|
||||
""" Symbol info of the instruction hit by a sample or a callchain entry of a sample.
|
||||
dso_name: path of the shared library containing the instruction.
|
||||
vaddr_in_file: virtual address of the instruction in the shared library.
|
||||
symbol_name: name of the function containing the instruction.
|
||||
symbol_addr: start addr of the function containing the instruction.
|
||||
symbol_len: length of the function in the shared library.
|
||||
mapping: the mapping area hit by the instruction.
|
||||
"""
|
||||
_fields_ = [('dso_name', ct.c_char_p),
|
||||
('vaddr_in_file', ct.c_uint64),
|
||||
('symbol_name', ct.c_char_p),
|
||||
('symbol_addr', ct.c_uint64),
|
||||
('symbol_len', ct.c_uint64),
|
||||
('mapping', ct.POINTER(MappingStruct))]
|
||||
|
||||
|
||||
class CallChainEntryStructure(ct.Structure):
|
||||
""" A callchain entry of a sample.
|
||||
ip: the address of the instruction of the callchain entry.
|
||||
symbol: symbol info of the callchain entry.
|
||||
"""
|
||||
_fields_ = [('ip', ct.c_uint64),
|
||||
('symbol', SymbolStruct)]
|
||||
|
||||
|
||||
class CallChainStructure(ct.Structure):
|
||||
""" Callchain info of a sample.
|
||||
nr: number of entries in the callchain.
|
||||
entries: a pointer to an array of CallChainEntryStructure.
|
||||
|
||||
For example, if a sample is generated when a thread is running function C
|
||||
with callchain function A -> function B -> function C.
|
||||
Then nr = 2, and entries = [function B, function A].
|
||||
"""
|
||||
_fields_ = [('nr', ct.c_uint32),
|
||||
('entries', ct.POINTER(CallChainEntryStructure))]
|
||||
|
||||
|
||||
class FeatureSectionStructure(ct.Structure):
|
||||
""" A feature section in perf.data to store information like record cmd, device arch, etc.
|
||||
data: a pointer to a buffer storing the section data.
|
||||
data_size: data size in bytes.
|
||||
"""
|
||||
_fields_ = [('data', ct.POINTER(ct.c_char)),
|
||||
('data_size', ct.c_uint32)]
|
||||
|
||||
|
||||
# convert char_p to str for python3.
|
||||
class SampleStructUsingStr(object):
|
||||
def __init__(self, sample):
|
||||
self.ip = sample.ip
|
||||
self.pid = sample.pid
|
||||
self.tid = sample.tid
|
||||
self.thread_comm = _char_pt_to_str(sample.thread_comm)
|
||||
self.time = sample.time
|
||||
self.in_kernel = sample.in_kernel
|
||||
self.cpu = sample.cpu
|
||||
self.period = sample.period
|
||||
|
||||
|
||||
class EventStructUsingStr(object):
|
||||
def __init__(self, event):
|
||||
self.name = _char_pt_to_str(event.name)
|
||||
|
||||
|
||||
class SymbolStructUsingStr(object):
|
||||
def __init__(self, symbol):
|
||||
self.dso_name = _char_pt_to_str(symbol.dso_name)
|
||||
self.vaddr_in_file = symbol.vaddr_in_file
|
||||
self.symbol_name = _char_pt_to_str(symbol.symbol_name)
|
||||
self.symbol_addr = symbol.symbol_addr
|
||||
self.mapping = symbol.mapping
|
||||
|
||||
|
||||
class CallChainEntryStructureUsingStr(object):
|
||||
def __init__(self, entry):
|
||||
self.ip = entry.ip
|
||||
self.symbol = SymbolStructUsingStr(entry.symbol)
|
||||
|
||||
|
||||
class CallChainStructureUsingStr(object):
|
||||
def __init__(self, callchain):
|
||||
self.nr = callchain.nr
|
||||
self.entries = []
|
||||
for i in range(self.nr):
|
||||
self.entries.append(CallChainEntryStructureUsingStr(callchain.entries[i]))
|
||||
|
||||
|
||||
class ReportLibStructure(ct.Structure):
|
||||
_fields_ = []
|
||||
|
||||
|
||||
class ReportLib(object):
|
||||
|
||||
def __init__(self, native_lib_path=None):
|
||||
if native_lib_path is None:
|
||||
native_lib_path = _get_native_lib()
|
||||
|
||||
self._load_dependent_lib()
|
||||
self._lib = ct.CDLL(native_lib_path)
|
||||
self._CreateReportLibFunc = self._lib.CreateReportLib
|
||||
self._CreateReportLibFunc.restype = ct.POINTER(ReportLibStructure)
|
||||
self._DestroyReportLibFunc = self._lib.DestroyReportLib
|
||||
self._SetLogSeverityFunc = self._lib.SetLogSeverity
|
||||
self._SetSymfsFunc = self._lib.SetSymfs
|
||||
self._SetRecordFileFunc = self._lib.SetRecordFile
|
||||
self._SetKallsymsFileFunc = self._lib.SetKallsymsFile
|
||||
self._ShowIpForUnknownSymbolFunc = self._lib.ShowIpForUnknownSymbol
|
||||
self._ShowArtFramesFunc = self._lib.ShowArtFrames
|
||||
self._GetNextSampleFunc = self._lib.GetNextSample
|
||||
self._GetNextSampleFunc.restype = ct.POINTER(SampleStruct)
|
||||
self._GetEventOfCurrentSampleFunc = self._lib.GetEventOfCurrentSample
|
||||
self._GetEventOfCurrentSampleFunc.restype = ct.POINTER(EventStruct)
|
||||
self._GetSymbolOfCurrentSampleFunc = self._lib.GetSymbolOfCurrentSample
|
||||
self._GetSymbolOfCurrentSampleFunc.restype = ct.POINTER(SymbolStruct)
|
||||
self._GetCallChainOfCurrentSampleFunc = self._lib.GetCallChainOfCurrentSample
|
||||
self._GetCallChainOfCurrentSampleFunc.restype = ct.POINTER(
|
||||
CallChainStructure)
|
||||
self._GetBuildIdForPathFunc = self._lib.GetBuildIdForPath
|
||||
self._GetBuildIdForPathFunc.restype = ct.c_char_p
|
||||
self._GetFeatureSection = self._lib.GetFeatureSection
|
||||
self._GetFeatureSection.restype = ct.POINTER(FeatureSectionStructure)
|
||||
self._instance = self._CreateReportLibFunc()
|
||||
assert not _is_null(self._instance)
|
||||
|
||||
self.convert_to_str = (sys.version_info >= (3, 0))
|
||||
self.meta_info = None
|
||||
self.current_sample = None
|
||||
self.record_cmd = None
|
||||
|
||||
def _load_dependent_lib(self):
|
||||
# As the windows dll is built with mingw we need to load 'libwinpthread-1.dll'.
|
||||
if is_windows():
|
||||
self._libwinpthread = ct.CDLL(get_host_binary_path('libwinpthread-1.dll'))
|
||||
|
||||
def Close(self):
|
||||
if self._instance is None:
|
||||
return
|
||||
self._DestroyReportLibFunc(self._instance)
|
||||
self._instance = None
|
||||
|
||||
def SetLogSeverity(self, log_level='info'):
|
||||
""" Set log severity of native lib, can be verbose,debug,info,error,fatal."""
|
||||
cond = self._SetLogSeverityFunc(self.getInstance(), _char_pt(log_level))
|
||||
self._check(cond, 'Failed to set log level')
|
||||
|
||||
def SetSymfs(self, symfs_dir):
|
||||
""" Set directory used to find symbols."""
|
||||
cond = self._SetSymfsFunc(self.getInstance(), _char_pt(symfs_dir))
|
||||
self._check(cond, 'Failed to set symbols directory')
|
||||
|
||||
def SetRecordFile(self, record_file):
|
||||
""" Set the path of record file, like perf.data."""
|
||||
cond = self._SetRecordFileFunc(self.getInstance(), _char_pt(record_file))
|
||||
self._check(cond, 'Failed to set record file')
|
||||
|
||||
def ShowIpForUnknownSymbol(self):
|
||||
self._ShowIpForUnknownSymbolFunc(self.getInstance())
|
||||
|
||||
def ShowArtFrames(self, show=True):
|
||||
""" Show frames of internal methods of the Java interpreter. """
|
||||
self._ShowArtFramesFunc(self.getInstance(), show)
|
||||
|
||||
def SetKallsymsFile(self, kallsym_file):
|
||||
""" Set the file path to a copy of the /proc/kallsyms file (for off device decoding) """
|
||||
cond = self._SetKallsymsFileFunc(self.getInstance(), _char_pt(kallsym_file))
|
||||
self._check(cond, 'Failed to set kallsyms file')
|
||||
|
||||
def GetNextSample(self):
|
||||
psample = self._GetNextSampleFunc(self.getInstance())
|
||||
if _is_null(psample):
|
||||
self.current_sample = None
|
||||
else:
|
||||
sample = psample[0]
|
||||
self.current_sample = SampleStructUsingStr(sample) if self.convert_to_str else sample
|
||||
return self.current_sample
|
||||
|
||||
def GetCurrentSample(self):
|
||||
return self.current_sample
|
||||
|
||||
def GetEventOfCurrentSample(self):
|
||||
event = self._GetEventOfCurrentSampleFunc(self.getInstance())
|
||||
assert not _is_null(event)
|
||||
if self.convert_to_str:
|
||||
return EventStructUsingStr(event[0])
|
||||
return event[0]
|
||||
|
||||
def GetSymbolOfCurrentSample(self):
|
||||
symbol = self._GetSymbolOfCurrentSampleFunc(self.getInstance())
|
||||
assert not _is_null(symbol)
|
||||
if self.convert_to_str:
|
||||
return SymbolStructUsingStr(symbol[0])
|
||||
return symbol[0]
|
||||
|
||||
def GetCallChainOfCurrentSample(self):
|
||||
callchain = self._GetCallChainOfCurrentSampleFunc(self.getInstance())
|
||||
assert not _is_null(callchain)
|
||||
if self.convert_to_str:
|
||||
return CallChainStructureUsingStr(callchain[0])
|
||||
return callchain[0]
|
||||
|
||||
def GetBuildIdForPath(self, path):
|
||||
build_id = self._GetBuildIdForPathFunc(self.getInstance(), _char_pt(path))
|
||||
assert not _is_null(build_id)
|
||||
return _char_pt_to_str(build_id)
|
||||
|
||||
def GetRecordCmd(self):
|
||||
if self.record_cmd is not None:
|
||||
return self.record_cmd
|
||||
self.record_cmd = ''
|
||||
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt('cmdline'))
|
||||
if not _is_null(feature_data):
|
||||
void_p = ct.cast(feature_data[0].data, ct.c_void_p)
|
||||
arg_count = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
|
||||
void_p.value += 4
|
||||
args = []
|
||||
for _ in range(arg_count):
|
||||
str_len = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
|
||||
void_p.value += 4
|
||||
char_p = ct.cast(void_p, ct.POINTER(ct.c_char))
|
||||
current_str = ''
|
||||
for j in range(str_len):
|
||||
c = bytes_to_str(char_p[j])
|
||||
if c != '\0':
|
||||
current_str += c
|
||||
if ' ' in current_str:
|
||||
current_str = '"' + current_str + '"'
|
||||
args.append(current_str)
|
||||
void_p.value += str_len
|
||||
self.record_cmd = ' '.join(args)
|
||||
return self.record_cmd
|
||||
|
||||
def _GetFeatureString(self, feature_name):
|
||||
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt(feature_name))
|
||||
result = ''
|
||||
if not _is_null(feature_data):
|
||||
void_p = ct.cast(feature_data[0].data, ct.c_void_p)
|
||||
str_len = ct.cast(void_p, ct.POINTER(ct.c_uint32)).contents.value
|
||||
void_p.value += 4
|
||||
char_p = ct.cast(void_p, ct.POINTER(ct.c_char))
|
||||
for i in range(str_len):
|
||||
c = bytes_to_str(char_p[i])
|
||||
if c == '\0':
|
||||
break
|
||||
result += c
|
||||
return result
|
||||
|
||||
def GetArch(self):
|
||||
return self._GetFeatureString('arch')
|
||||
|
||||
def MetaInfo(self):
|
||||
""" Return a string to string map stored in meta_info section in perf.data.
|
||||
It is used to pass some short meta information.
|
||||
"""
|
||||
if self.meta_info is None:
|
||||
self.meta_info = {}
|
||||
feature_data = self._GetFeatureSection(self.getInstance(), _char_pt('meta_info'))
|
||||
if not _is_null(feature_data):
|
||||
str_list = []
|
||||
data = feature_data[0].data
|
||||
data_size = feature_data[0].data_size
|
||||
current_str = ''
|
||||
for i in range(data_size):
|
||||
c = bytes_to_str(data[i])
|
||||
if c != '\0':
|
||||
current_str += c
|
||||
else:
|
||||
str_list.append(current_str)
|
||||
current_str = ''
|
||||
for i in range(0, len(str_list), 2):
|
||||
self.meta_info[str_list[i]] = str_list[i + 1]
|
||||
return self.meta_info
|
||||
|
||||
def getInstance(self):
|
||||
if self._instance is None:
|
||||
raise Exception('Instance is Closed')
|
||||
return self._instance
|
||||
|
||||
def _check(self, cond, failmsg):
|
||||
if not cond:
|
||||
raise Exception(failmsg)
|
|
@ -1,677 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2016 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""utils.py: export utility functions.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
def get_script_dir():
|
||||
return os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
def is_windows():
|
||||
return sys.platform == 'win32' or sys.platform == 'cygwin'
|
||||
|
||||
def is_darwin():
|
||||
return sys.platform == 'darwin'
|
||||
|
||||
def get_platform():
|
||||
if is_windows():
|
||||
return 'windows'
|
||||
if is_darwin():
|
||||
return 'darwin'
|
||||
return 'linux'
|
||||
|
||||
def is_python3():
|
||||
return sys.version_info >= (3, 0)
|
||||
|
||||
|
||||
def log_debug(msg):
|
||||
logging.debug(msg)
|
||||
|
||||
|
||||
def log_info(msg):
|
||||
logging.info(msg)
|
||||
|
||||
|
||||
def log_warning(msg):
|
||||
logging.warning(msg)
|
||||
|
||||
|
||||
def log_fatal(msg):
|
||||
raise Exception(msg)
|
||||
|
||||
def log_exit(msg):
|
||||
sys.exit(msg)
|
||||
|
||||
def disable_debug_log():
|
||||
logging.getLogger().setLevel(logging.WARN)
|
||||
|
||||
def str_to_bytes(str):
|
||||
if not is_python3():
|
||||
return str
|
||||
# In python 3, str are wide strings whereas the C api expects 8 bit strings,
|
||||
# hence we have to convert. For now using utf-8 as the encoding.
|
||||
return str.encode('utf-8')
|
||||
|
||||
def bytes_to_str(bytes):
|
||||
if not is_python3():
|
||||
return bytes
|
||||
return bytes.decode('utf-8')
|
||||
|
||||
def get_target_binary_path(arch, binary_name):
|
||||
if arch == 'aarch64':
|
||||
arch = 'arm64'
|
||||
arch_dir = os.path.join(get_script_dir(), "bin", "android", arch)
|
||||
if not os.path.isdir(arch_dir):
|
||||
log_fatal("can't find arch directory: %s" % arch_dir)
|
||||
binary_path = os.path.join(arch_dir, binary_name)
|
||||
if not os.path.isfile(binary_path):
|
||||
log_fatal("can't find binary: %s" % binary_path)
|
||||
return binary_path
|
||||
|
||||
|
||||
def get_host_binary_path(binary_name):
|
||||
dir = os.path.join(get_script_dir(), 'bin')
|
||||
if is_windows():
|
||||
if binary_name.endswith('.so'):
|
||||
binary_name = binary_name[0:-3] + '.dll'
|
||||
elif '.' not in binary_name:
|
||||
binary_name += '.exe'
|
||||
dir = os.path.join(dir, 'windows')
|
||||
elif sys.platform == 'darwin': # OSX
|
||||
if binary_name.endswith('.so'):
|
||||
binary_name = binary_name[0:-3] + '.dylib'
|
||||
dir = os.path.join(dir, 'darwin')
|
||||
else:
|
||||
dir = os.path.join(dir, 'linux')
|
||||
dir = os.path.join(dir, 'x86_64' if sys.maxsize > 2 ** 32 else 'x86')
|
||||
binary_path = os.path.join(dir, binary_name)
|
||||
if not os.path.isfile(binary_path):
|
||||
log_fatal("can't find binary: %s" % binary_path)
|
||||
return binary_path
|
||||
|
||||
|
||||
def is_executable_available(executable, option='--help'):
|
||||
""" Run an executable to see if it exists. """
|
||||
try:
|
||||
subproc = subprocess.Popen([executable, option], stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
subproc.communicate()
|
||||
return subproc.returncode == 0
|
||||
except:
|
||||
return False
|
||||
|
||||
DEFAULT_NDK_PATH = {
|
||||
'darwin': 'Library/Android/sdk/ndk-bundle',
|
||||
'linux': 'Android/Sdk/ndk-bundle',
|
||||
'windows': 'AppData/Local/Android/sdk/ndk-bundle',
|
||||
}
|
||||
|
||||
EXPECTED_TOOLS = {
|
||||
'adb': {
|
||||
'is_binutils': False,
|
||||
'test_option': 'version',
|
||||
'path_in_ndk': '../platform-tools/adb',
|
||||
},
|
||||
'readelf': {
|
||||
'is_binutils': True,
|
||||
'accept_tool_without_arch': True,
|
||||
},
|
||||
'addr2line': {
|
||||
'is_binutils': True,
|
||||
'accept_tool_without_arch': True
|
||||
},
|
||||
'objdump': {
|
||||
'is_binutils': True,
|
||||
},
|
||||
}
|
||||
|
||||
def _get_binutils_path_in_ndk(toolname, arch, platform):
|
||||
if not arch:
|
||||
arch = 'arm64'
|
||||
if arch == 'arm64':
|
||||
name = 'aarch64-linux-android-' + toolname
|
||||
path = 'toolchains/aarch64-linux-android-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
|
||||
elif arch == 'arm':
|
||||
name = 'arm-linux-androideabi-' + toolname
|
||||
path = 'toolchains/arm-linux-androideabi-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
|
||||
elif arch == 'x86_64':
|
||||
name = 'x86_64-linux-android-' + toolname
|
||||
path = 'toolchains/x86_64-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
|
||||
elif arch == 'x86':
|
||||
name = 'i686-linux-android-' + toolname
|
||||
path = 'toolchains/x86-4.9/prebuilt/%s-x86_64/bin/%s' % (platform, name)
|
||||
else:
|
||||
log_fatal('unexpected arch %s' % arch)
|
||||
return (name, path)
|
||||
|
||||
def find_tool_path(toolname, ndk_path=None, arch=None):
|
||||
if toolname not in EXPECTED_TOOLS:
|
||||
return None
|
||||
tool_info = EXPECTED_TOOLS[toolname]
|
||||
is_binutils = tool_info['is_binutils']
|
||||
test_option = tool_info.get('test_option', '--help')
|
||||
platform = get_platform()
|
||||
if is_binutils:
|
||||
toolname_with_arch, path_in_ndk = _get_binutils_path_in_ndk(toolname, arch, platform)
|
||||
else:
|
||||
toolname_with_arch = toolname
|
||||
path_in_ndk = tool_info['path_in_ndk']
|
||||
path_in_ndk = path_in_ndk.replace('/', os.sep)
|
||||
|
||||
# 1. Find tool in the given ndk path.
|
||||
if ndk_path:
|
||||
path = os.path.join(ndk_path, path_in_ndk)
|
||||
if is_executable_available(path, test_option):
|
||||
return path
|
||||
|
||||
# 2. Find tool in the ndk directory containing simpleperf scripts.
|
||||
path = os.path.join('..', path_in_ndk)
|
||||
if is_executable_available(path, test_option):
|
||||
return path
|
||||
|
||||
# 3. Find tool in the default ndk installation path.
|
||||
home = os.environ.get('HOMEPATH') if is_windows() else os.environ.get('HOME')
|
||||
if home:
|
||||
default_ndk_path = os.path.join(home, DEFAULT_NDK_PATH[platform].replace('/', os.sep))
|
||||
path = os.path.join(default_ndk_path, path_in_ndk)
|
||||
if is_executable_available(path, test_option):
|
||||
return path
|
||||
|
||||
# 4. Find tool in $PATH.
|
||||
if is_executable_available(toolname_with_arch, test_option):
|
||||
return toolname_with_arch
|
||||
|
||||
# 5. Find tool without arch in $PATH.
|
||||
if is_binutils and tool_info.get('accept_tool_without_arch'):
|
||||
if is_executable_available(toolname, test_option):
|
||||
return toolname
|
||||
return None
|
||||
|
||||
|
||||
class AdbHelper(object):
|
||||
def __init__(self, enable_switch_to_root=True):
|
||||
adb_path = find_tool_path('adb')
|
||||
if not adb_path:
|
||||
log_exit("Can't find adb in PATH environment.")
|
||||
self.adb_path = adb_path
|
||||
self.enable_switch_to_root = enable_switch_to_root
|
||||
|
||||
def run(self, adb_args):
|
||||
return self.run_and_return_output(adb_args)[0]
|
||||
|
||||
def run_and_return_output(self, adb_args, stdout_file=None, log_output=True):
|
||||
adb_args = [self.adb_path] + adb_args
|
||||
log_debug('run adb cmd: %s' % adb_args)
|
||||
if stdout_file:
|
||||
with open(stdout_file, 'wb') as stdout_fh:
|
||||
returncode = subprocess.call(adb_args, stdout=stdout_fh)
|
||||
stdoutdata = ''
|
||||
else:
|
||||
subproc = subprocess.Popen(adb_args, stdout=subprocess.PIPE)
|
||||
(stdoutdata, _) = subproc.communicate()
|
||||
returncode = subproc.returncode
|
||||
result = (returncode == 0)
|
||||
if stdoutdata and adb_args[1] != 'push' and adb_args[1] != 'pull':
|
||||
stdoutdata = bytes_to_str(stdoutdata)
|
||||
if log_output:
|
||||
log_debug(stdoutdata)
|
||||
log_debug('run adb cmd: %s [result %s]' % (adb_args, result))
|
||||
return (result, stdoutdata)
|
||||
|
||||
def check_run(self, adb_args):
|
||||
self.check_run_and_return_output(adb_args)
|
||||
|
||||
def check_run_and_return_output(self, adb_args, stdout_file=None, log_output=True):
|
||||
result, stdoutdata = self.run_and_return_output(adb_args, stdout_file, log_output)
|
||||
if not result:
|
||||
log_exit('run "adb %s" failed' % adb_args)
|
||||
return stdoutdata
|
||||
|
||||
def _unroot(self):
|
||||
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
|
||||
if not result:
|
||||
return
|
||||
if 'root' not in stdoutdata:
|
||||
return
|
||||
log_info('unroot adb')
|
||||
self.run(['unroot'])
|
||||
self.run(['wait-for-device'])
|
||||
time.sleep(1)
|
||||
|
||||
def switch_to_root(self):
|
||||
if not self.enable_switch_to_root:
|
||||
self._unroot()
|
||||
return False
|
||||
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
|
||||
if not result:
|
||||
return False
|
||||
if 'root' in stdoutdata:
|
||||
return True
|
||||
build_type = self.get_property('ro.build.type')
|
||||
if build_type == 'user':
|
||||
return False
|
||||
self.run(['root'])
|
||||
time.sleep(1)
|
||||
self.run(['wait-for-device'])
|
||||
result, stdoutdata = self.run_and_return_output(['shell', 'whoami'])
|
||||
return result and 'root' in stdoutdata
|
||||
|
||||
def get_property(self, name):
|
||||
result, stdoutdata = self.run_and_return_output(['shell', 'getprop', name])
|
||||
return stdoutdata if result else None
|
||||
|
||||
def set_property(self, name, value):
|
||||
return self.run(['shell', 'setprop', name, value])
|
||||
|
||||
def get_device_arch(self):
|
||||
output = self.check_run_and_return_output(['shell', 'uname', '-m'])
|
||||
if 'aarch64' in output:
|
||||
return 'arm64'
|
||||
if 'arm' in output:
|
||||
return 'arm'
|
||||
if 'x86_64' in output:
|
||||
return 'x86_64'
|
||||
if '86' in output:
|
||||
return 'x86'
|
||||
log_fatal('unsupported architecture: %s' % output.strip())
|
||||
|
||||
def get_android_version(self):
|
||||
build_version = self.get_property('ro.build.version.release')
|
||||
android_version = 0
|
||||
if build_version:
|
||||
if not build_version[0].isdigit():
|
||||
c = build_version[0].upper()
|
||||
if c.isupper() and c >= 'L':
|
||||
android_version = ord(c) - ord('L') + 5
|
||||
else:
|
||||
strs = build_version.split('.')
|
||||
if strs:
|
||||
android_version = int(strs[0])
|
||||
return android_version
|
||||
|
||||
|
||||
def flatten_arg_list(arg_list):
|
||||
res = []
|
||||
if arg_list:
|
||||
for items in arg_list:
|
||||
res += items
|
||||
return res
|
||||
|
||||
|
||||
def remove(dir_or_file):
|
||||
if os.path.isfile(dir_or_file):
|
||||
os.remove(dir_or_file)
|
||||
elif os.path.isdir(dir_or_file):
|
||||
shutil.rmtree(dir_or_file, ignore_errors=True)
|
||||
|
||||
|
||||
def open_report_in_browser(report_path):
|
||||
if is_darwin():
|
||||
# On darwin 10.12.6, webbrowser can't open browser, so try `open` cmd first.
|
||||
try:
|
||||
subprocess.check_call(['open', report_path])
|
||||
return
|
||||
except:
|
||||
pass
|
||||
import webbrowser
|
||||
try:
|
||||
# Try to open the report with Chrome
|
||||
browser_key = ''
|
||||
for key, _ in webbrowser._browsers.items():
|
||||
if 'chrome' in key:
|
||||
browser_key = key
|
||||
browser = webbrowser.get(browser_key)
|
||||
browser.open(report_path, new=0, autoraise=True)
|
||||
except:
|
||||
# webbrowser.get() doesn't work well on darwin/windows.
|
||||
webbrowser.open_new_tab(report_path)
|
||||
|
||||
|
||||
def find_real_dso_path(dso_path_in_record_file, binary_cache_path):
|
||||
""" Given the path of a shared library in perf.data, find its real path in the file system. """
|
||||
if dso_path_in_record_file[0] != '/' or dso_path_in_record_file == '//anon':
|
||||
return None
|
||||
if binary_cache_path:
|
||||
tmp_path = os.path.join(binary_cache_path, dso_path_in_record_file[1:])
|
||||
if os.path.isfile(tmp_path):
|
||||
return tmp_path
|
||||
if os.path.isfile(dso_path_in_record_file):
|
||||
return dso_path_in_record_file
|
||||
return None
|
||||
|
||||
|
||||
class Addr2Nearestline(object):
|
||||
""" Use addr2line to convert (dso_path, func_addr, addr) to (source_file, line) pairs.
|
||||
For instructions generated by C++ compilers without a matching statement in source code
|
||||
(like stack corruption check, switch optimization, etc.), addr2line can't generate
|
||||
line information. However, we want to assign the instruction to the nearest line before
|
||||
the instruction (just like objdump -dl). So we use below strategy:
|
||||
Instead of finding the exact line of the instruction in an address, we find the nearest
|
||||
line to the instruction in an address. If an address doesn't have a line info, we find
|
||||
the line info of address - 1. If still no line info, then use address - 2, address - 3,
|
||||
etc.
|
||||
|
||||
The implementation steps are as below:
|
||||
1. Collect all (dso_path, func_addr, addr) requests before converting. This saves the
|
||||
times to call addr2line.
|
||||
2. Convert addrs to (source_file, line) pairs for each dso_path as below:
|
||||
2.1 Check if the dso_path has .debug_line. If not, omit its conversion.
|
||||
2.2 Get arch of the dso_path, and decide the addr_step for it. addr_step is the step we
|
||||
change addr each time. For example, since instructions of arm64 are all 4 bytes long,
|
||||
addr_step for arm64 can be 4.
|
||||
2.3 Use addr2line to find line info for each addr in the dso_path.
|
||||
2.4 For each addr without line info, use addr2line to find line info for
|
||||
range(addr - addr_step, addr - addr_step * 4 - 1, -addr_step).
|
||||
2.5 For each addr without line info, use addr2line to find line info for
|
||||
range(addr - addr_step * 5, addr - addr_step * 128 - 1, -addr_step).
|
||||
(128 is a guess number. A nested switch statement in
|
||||
system/core/demangle/Demangler.cpp has >300 bytes without line info in arm64.)
|
||||
"""
|
||||
class Dso(object):
|
||||
""" Info of a dynamic shared library.
|
||||
addrs: a map from address to Addr object in this dso.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.addrs = {}
|
||||
|
||||
class Addr(object):
|
||||
""" Info of an addr request.
|
||||
func_addr: start_addr of the function containing addr.
|
||||
source_lines: a list of [file_id, line_number] for addr.
|
||||
source_lines[:-1] are all for inlined functions.
|
||||
"""
|
||||
def __init__(self, func_addr):
|
||||
self.func_addr = func_addr
|
||||
self.source_lines = None
|
||||
|
||||
def __init__(self, ndk_path, binary_cache_path):
|
||||
self.addr2line_path = find_tool_path('addr2line', ndk_path)
|
||||
if not self.addr2line_path:
|
||||
log_exit("Can't find addr2line. Please set ndk path with --ndk-path option.")
|
||||
self.readelf = ReadElf(ndk_path)
|
||||
self.dso_map = {} # map from dso_path to Dso.
|
||||
self.binary_cache_path = binary_cache_path
|
||||
# Saving file names for each addr takes a lot of memory. So we store file ids in Addr,
|
||||
# and provide data structures connecting file id and file name here.
|
||||
self.file_name_to_id = {}
|
||||
self.file_id_to_name = []
|
||||
|
||||
def add_addr(self, dso_path, func_addr, addr):
|
||||
dso = self.dso_map.get(dso_path)
|
||||
if dso is None:
|
||||
dso = self.dso_map[dso_path] = self.Dso()
|
||||
if addr not in dso.addrs:
|
||||
dso.addrs[addr] = self.Addr(func_addr)
|
||||
|
||||
def convert_addrs_to_lines(self):
|
||||
for dso_path in self.dso_map:
|
||||
self._convert_addrs_in_one_dso(dso_path, self.dso_map[dso_path])
|
||||
|
||||
def _convert_addrs_in_one_dso(self, dso_path, dso):
|
||||
real_path = find_real_dso_path(dso_path, self.binary_cache_path)
|
||||
if not real_path:
|
||||
if dso_path not in ['//anon', 'unknown', '[kernel.kallsyms]']:
|
||||
log_debug("Can't find dso %s" % dso_path)
|
||||
return
|
||||
|
||||
if not self._check_debug_line_section(real_path):
|
||||
log_debug("file %s doesn't contain .debug_line section." % real_path)
|
||||
return
|
||||
|
||||
addr_step = self._get_addr_step(real_path)
|
||||
self._collect_line_info(dso, real_path, [0])
|
||||
self._collect_line_info(dso, real_path, range(-addr_step, -addr_step * 4 - 1, -addr_step))
|
||||
self._collect_line_info(dso, real_path,
|
||||
range(-addr_step * 5, -addr_step * 128 - 1, -addr_step))
|
||||
|
||||
def _check_debug_line_section(self, real_path):
|
||||
return '.debug_line' in self.readelf.get_sections(real_path)
|
||||
|
||||
def _get_addr_step(self, real_path):
|
||||
arch = self.readelf.get_arch(real_path)
|
||||
if arch == 'arm64':
|
||||
return 4
|
||||
if arch == 'arm':
|
||||
return 2
|
||||
return 1
|
||||
|
||||
def _collect_line_info(self, dso, real_path, addr_shifts):
|
||||
""" Use addr2line to get line info in a dso, with given addr shifts. """
|
||||
# 1. Collect addrs to send to addr2line.
|
||||
addr_set = set()
|
||||
for addr in dso.addrs:
|
||||
addr_obj = dso.addrs[addr]
|
||||
if addr_obj.source_lines: # already has source line, no need to search.
|
||||
continue
|
||||
for shift in addr_shifts:
|
||||
# The addr after shift shouldn't change to another function.
|
||||
shifted_addr = max(addr + shift, addr_obj.func_addr)
|
||||
addr_set.add(shifted_addr)
|
||||
if shifted_addr == addr_obj.func_addr:
|
||||
break
|
||||
if not addr_set:
|
||||
return
|
||||
addr_request = '\n'.join(['%x' % addr for addr in sorted(addr_set)])
|
||||
|
||||
# 2. Use addr2line to collect line info.
|
||||
try:
|
||||
subproc = subprocess.Popen([self.addr2line_path, '-ai', '-e', real_path],
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
(stdoutdata, _) = subproc.communicate(str_to_bytes(addr_request))
|
||||
stdoutdata = bytes_to_str(stdoutdata)
|
||||
except:
|
||||
return
|
||||
addr_map = {}
|
||||
cur_line_list = None
|
||||
for line in stdoutdata.strip().split('\n'):
|
||||
if line[:2] == '0x':
|
||||
# a new address
|
||||
cur_line_list = addr_map[int(line, 16)] = []
|
||||
else:
|
||||
# a file:line.
|
||||
if cur_line_list is None:
|
||||
continue
|
||||
# Handle lines like "C:\Users\...\file:32".
|
||||
items = line.rsplit(':', 1)
|
||||
if len(items) != 2:
|
||||
continue
|
||||
if '?' in line:
|
||||
# if ? in line, it doesn't have a valid line info.
|
||||
# An addr can have a list of (file, line), when the addr belongs to an inlined
|
||||
# function. Sometimes only part of the list has ? mark. In this case, we think
|
||||
# the line info is valid if the first line doesn't have ? mark.
|
||||
if not cur_line_list:
|
||||
cur_line_list = None
|
||||
continue
|
||||
(file_path, line_number) = items
|
||||
line_number = line_number.split()[0] # Remove comments after line number
|
||||
try:
|
||||
line_number = int(line_number)
|
||||
except ValueError:
|
||||
continue
|
||||
file_id = self._get_file_id(file_path)
|
||||
cur_line_list.append((file_id, line_number))
|
||||
|
||||
# 3. Fill line info in dso.addrs.
|
||||
for addr in dso.addrs:
|
||||
addr_obj = dso.addrs[addr]
|
||||
if addr_obj.source_lines:
|
||||
continue
|
||||
for shift in addr_shifts:
|
||||
shifted_addr = max(addr + shift, addr_obj.func_addr)
|
||||
lines = addr_map.get(shifted_addr)
|
||||
if lines:
|
||||
addr_obj.source_lines = lines
|
||||
break
|
||||
if shifted_addr == addr_obj.func_addr:
|
||||
break
|
||||
|
||||
def _get_file_id(self, file_path):
|
||||
file_id = self.file_name_to_id.get(file_path)
|
||||
if file_id is None:
|
||||
file_id = self.file_name_to_id[file_path] = len(self.file_id_to_name)
|
||||
self.file_id_to_name.append(file_path)
|
||||
return file_id
|
||||
|
||||
def get_dso(self, dso_path):
|
||||
return self.dso_map.get(dso_path)
|
||||
|
||||
def get_addr_source(self, dso, addr):
|
||||
source = dso.addrs[addr].source_lines
|
||||
if source is None:
|
||||
return None
|
||||
return [(self.file_id_to_name[file_id], line) for (file_id, line) in source]
|
||||
|
||||
|
||||
class Objdump(object):
|
||||
""" A wrapper of objdump to disassemble code. """
|
||||
def __init__(self, ndk_path, binary_cache_path):
|
||||
self.ndk_path = ndk_path
|
||||
self.binary_cache_path = binary_cache_path
|
||||
self.readelf = ReadElf(ndk_path)
|
||||
self.objdump_paths = {}
|
||||
|
||||
def disassemble_code(self, dso_path, start_addr, addr_len):
|
||||
""" Disassemble [start_addr, start_addr + addr_len] of dso_path.
|
||||
Return a list of pair (disassemble_code_line, addr).
|
||||
"""
|
||||
# 1. Find real path.
|
||||
real_path = find_real_dso_path(dso_path, self.binary_cache_path)
|
||||
if real_path is None:
|
||||
return None
|
||||
|
||||
# 2. Get path of objdump.
|
||||
arch = self.readelf.get_arch(real_path)
|
||||
if arch == 'unknown':
|
||||
return None
|
||||
objdump_path = self.objdump_paths.get(arch)
|
||||
if not objdump_path:
|
||||
objdump_path = find_tool_path('objdump', self.ndk_path, arch)
|
||||
if not objdump_path:
|
||||
log_exit("Can't find objdump. Please set ndk path with --ndk_path option.")
|
||||
self.objdump_paths[arch] = objdump_path
|
||||
|
||||
# 3. Run objdump.
|
||||
args = [objdump_path, '-dlC', '--no-show-raw-insn',
|
||||
'--start-address=0x%x' % start_addr,
|
||||
'--stop-address=0x%x' % (start_addr + addr_len),
|
||||
real_path]
|
||||
try:
|
||||
subproc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
(stdoutdata, _) = subproc.communicate()
|
||||
stdoutdata = bytes_to_str(stdoutdata)
|
||||
except:
|
||||
return None
|
||||
|
||||
if not stdoutdata:
|
||||
return None
|
||||
result = []
|
||||
for line in stdoutdata.split('\n'):
|
||||
line = line.rstrip() # Remove '\r' on Windows.
|
||||
items = line.split(':', 1)
|
||||
try:
|
||||
addr = int(items[0], 16)
|
||||
except ValueError:
|
||||
addr = 0
|
||||
result.append((line, addr))
|
||||
return result
|
||||
|
||||
|
||||
class ReadElf(object):
|
||||
""" A wrapper of readelf. """
|
||||
def __init__(self, ndk_path):
|
||||
self.readelf_path = find_tool_path('readelf', ndk_path)
|
||||
if not self.readelf_path:
|
||||
log_exit("Can't find readelf. Please set ndk path with --ndk_path option.")
|
||||
|
||||
def get_arch(self, elf_file_path):
|
||||
""" Get arch of an elf file. """
|
||||
try:
|
||||
output = subprocess.check_output([self.readelf_path, '-h', elf_file_path])
|
||||
if output.find('AArch64') != -1:
|
||||
return 'arm64'
|
||||
if output.find('ARM') != -1:
|
||||
return 'arm'
|
||||
if output.find('X86-64') != -1:
|
||||
return 'x86_64'
|
||||
if output.find('80386') != -1:
|
||||
return 'x86'
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
return 'unknown'
|
||||
|
||||
def get_build_id(self, elf_file_path):
|
||||
""" Get build id of an elf file. """
|
||||
try:
|
||||
output = subprocess.check_output([self.readelf_path, '-n', elf_file_path])
|
||||
output = bytes_to_str(output)
|
||||
result = re.search(r'Build ID:\s*(\S+)', output)
|
||||
if result:
|
||||
build_id = result.group(1)
|
||||
if len(build_id) < 40:
|
||||
build_id += '0' * (40 - len(build_id))
|
||||
else:
|
||||
build_id = build_id[:40]
|
||||
build_id = '0x' + build_id
|
||||
return build_id
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
return ""
|
||||
|
||||
def get_sections(self, elf_file_path):
|
||||
""" Get sections of an elf file. """
|
||||
section_names = []
|
||||
try:
|
||||
output = subprocess.check_output([self.readelf_path, '-SW', elf_file_path])
|
||||
output = bytes_to_str(output)
|
||||
for line in output.split('\n'):
|
||||
# Parse line like:" [ 1] .note.android.ident NOTE 0000000000400190 ...".
|
||||
result = re.search(r'^\s+\[\s*\d+\]\s(.+?)\s', line)
|
||||
if result:
|
||||
section_name = result.group(1).strip()
|
||||
if section_name:
|
||||
section_names.append(section_name)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
return section_names
|
||||
|
||||
def extant_dir(arg):
|
||||
"""ArgumentParser type that only accepts extant directories.
|
||||
|
||||
Args:
|
||||
arg: The string argument given on the command line.
|
||||
Returns: The argument as a realpath.
|
||||
Raises:
|
||||
argparse.ArgumentTypeError: The given path isn't a directory.
|
||||
"""
|
||||
path = os.path.realpath(arg)
|
||||
if not os.path.isdir(path):
|
||||
import argparse
|
||||
raise argparse.ArgumentTypeError('{} is not a directory.'.format(path))
|
||||
return path
|
||||
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
|
@ -1,3 +1,3 @@
|
|||
git clone https://android.googlesource.com/platform/prebuilts/simpleperf
|
||||
git reset --hard 311a9d2cd27841498fc90a0b26a755deb47e7ebd
|
||||
cp -r report_html.* simpleperf_report_lib.py utils.py inferno lib ~/one/external/simpleperf/
|
||||
cp -r report_html.* simpleperf_report_lib.py utils.py inferno lib ~/one/external/simpleperf/
|
Loading…
Reference in New Issue