Staging: lttng: remove from the drivers/staging/ tree

The "proper" way to do this is to work with the existing in-kernel
tracing subsystem and work to get the missing features that are in lttng
into those subsystems.

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Greg Kroah-Hartman 2011-12-08 15:25:56 -08:00
parent 03cf152646
commit 877a0893e3
134 changed files with 0 additions and 25927 deletions

View file

@ -66,8 +66,6 @@ source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"
source "drivers/staging/lttng/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/staging/octeon/Kconfig"

View file

@ -25,7 +25,6 @@ obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_LTTNG) += lttng/
obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
obj-$(CONFIG_USB_SERIAL_QUATECH_USB2) += quatech_usb2/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/

View file

@ -1,35 +0,0 @@
config LTTNG
tristate "LTTng kernel tracer"
depends on TRACEPOINTS
help
The LTTng 2.0 Tracer Toolchain allows integrated kernel and
user-space tracing from a single user interface: the "lttng"
command. See http://lttng.org website for the "lttng-tools"
user-space tracer control tools package and the "babeltrace"
package for conversion of trace data to a human-readable
format.
LTTng features:
- System-wide tracing across kernel, libraries and
applications,
- Tracepoints, detailed syscall tracing (fast strace replacement),
Function tracer, CPU Performance Monitoring Unit (PMU) counters
and kprobes support,
- Have the ability to attach "context" information to events in the
trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc). All
the extra information fields to be collected with events are
optional, specified on a per-tracing-session basis (except for
timestamp and event id, which are mandatory).
- Precise and fast clock sources with near cycle-level
timestamps,
- Efficient trace data transport:
- Compact Binary format with CTF,
- Per-core buffers ensures scalability,
- Fast-paths in caller context, amortized synchronization,
- Zero-copy using splice and mmap system calls, over disk,
network or consumed in-place,
- Multiple concurrent tracing sessions are supported,
- Designed to meet hard real-time constraints,
- Supports live streaming of the trace data,
- Produces CTF (Common Trace Format) natively (see
http://www.efficios.com/ctf).

View file

@ -1,27 +0,0 @@
LTTng modules licensing
Mathieu Desnoyers
June 2, 2011
* LGPLv2.1/GPLv2 dual-license
The files contained within this package are licensed under
LGPLv2.1/GPLv2 dual-license (see lgpl-2.1.txt and gpl-2.0.txt for
details), except for files identified by the following sections.
* GPLv2 license
These files are licensed exclusively under the GPLv2 license. See
gpl-2.0.txt for details.
lib/ringbuffer/ring_buffer_splice.c
lib/ringbuffer/ring_buffer_mmap.c
instrumentation/events/mainline/*.h
instrumentation/events/lttng-modules/*.h
* MIT-style license
These files are licensed under an MIT-style license:
lib/prio_heap/lttng_prio_heap.h
lib/prio_heap/lttng_prio_heap.c
lib/bitfield.h

View file

@ -1,33 +0,0 @@
#
# Makefile for the LTTng modules.
#
obj-m += ltt-ring-buffer-client-discard.o
obj-m += ltt-ring-buffer-client-overwrite.o
obj-m += ltt-ring-buffer-metadata-client.o
obj-m += ltt-ring-buffer-client-mmap-discard.o
obj-m += ltt-ring-buffer-client-mmap-overwrite.o
obj-m += ltt-ring-buffer-metadata-mmap-client.o
obj-m += ltt-relay.o
ltt-relay-objs := ltt-events.o ltt-debugfs-abi.o \
ltt-probes.o ltt-context.o \
lttng-context-pid.o lttng-context-procname.o \
lttng-context-prio.o lttng-context-nice.o \
lttng-context-vpid.o lttng-context-tid.o \
lttng-context-vtid.o lttng-context-ppid.o \
lttng-context-vppid.o lttng-calibrate.o
ifneq ($(CONFIG_HAVE_SYSCALL_TRACEPOINTS),)
ltt-relay-objs += lttng-syscalls.o
endif
ifneq ($(CONFIG_PERF_EVENTS),)
ltt-relay-objs += $(shell \
if [ $(VERSION) -ge 3 \
-o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 33 \) ] ; then \
echo "lttng-context-perf-counters.o" ; fi;)
endif
obj-m += probes/
obj-m += lib/

View file

@ -1,48 +0,0 @@
LTTng 2.0 modules
Mathieu Desnoyers
November 1st, 2011
LTTng 2.0 kernel modules is currently part of the Linux kernel staging
tree. It features (new features since LTTng 0.x):
- Produces CTF (Common Trace Format) natively,
(http://www.efficios.com/ctf)
- Tracepoints, Function tracer, CPU Performance Monitoring Unit (PMU)
counters, kprobes, and kretprobes support,
- Integrated interface for both kernel and userspace tracing,
- Have the ability to attach "context" information to events in the
trace (e.g. any PMU counter, pid, ppid, tid, comm name, etc).
All the extra information fields to be collected with events are
optional, specified on a per-tracing-session basis (except for
timestamp and event id, which are mandatory).
To build and install, you need to select "Staging" modules, and the
LTTng kernel tracer.
Use lttng-tools to control the tracer. LTTng tools should automatically
load the kernel modules when needed. Use Babeltrace to print traces as a
human-readable text log. These tools are available at the following URL:
http://lttng.org/lttng2.0
Please note that the LTTng-UST 2.0 (user-space tracing counterpart of
LTTng 2.0) is now ready to be used, but still only available from the
git repository.
So far, it has been tested on vanilla Linux kernels 2.6.38, 2.6.39 and
3.0 (on x86 32/64-bit, and powerpc 32-bit at the moment, build tested on
ARM). It should work fine with newer kernels and other architectures,
but expect build issues with kernels older than 2.6.36. The clock source
currently used is the standard gettimeofday (slower, less scalable and
less precise than the LTTng 0.x clocks). Support for LTTng 0.x clocks
will be added back soon into LTTng 2.0. Please note that lttng-modules
2.0 can build on a Linux kernel patched with the LTTng 0.x patchset, but
the lttng-modules 2.0 replace the lttng-modules 0.x, so both tracers
cannot be installed at the same time for a given kernel version.
* Note about Perf PMU counters support
Each PMU counter has its zero value set when it is attached to a context with
add-context. Therefore, it is normal that the same counters attached to both the
stream context and event context show different values for a given event; what
matters is that they increment at the same rate.

View file

@ -1,131 +0,0 @@
Please contact Mathieu Desnoyers <mathieu.desnoyers@efficios.com> for
questions about this TODO list. The "Cleanup/Testing" section would be
good to go through before integration into mainline. The "Features"
section is a wish list of features to complete before releasing the
"LTTng 2.0" final version, but are not required to have LTTng working.
These features are mostly performance enhancements and instrumentation
enhancements.
TODO:
A) Cleanup/Testing
1) Remove debugfs "lttng" file (keep only procfs "lttng" file).
The rationale for this is that this file is needed for
user-level tracing support (LTTng-UST 2.0) intended to be
used on production system, and therefore should be present as
part of a "usually mounted" filesystem rather than a debug
filesystem.
2) Cleanup wrappers. The drivers/staging/lttng/wrapper directory
contains various wrapper headers that use kallsyms lookups to
work around some missing EXPORT_SYMBOL_GPL() in the mainline
kernel. Ideally, those few symbols should become exported to
modules by the kernel.
3) Test lib ring buffer snapshot feature.
When working on the lttngtop project, Julien Desfossez
reported that he needed to push the consumer position
forward explicitely with lib_ring_buffer_put_next_subbuf.
This means that although the usual case of pairs of
lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf
work fine, there is probably a problem that needs to be
investigated in
lib_ring_buffer_get_subbuf/lib_ring_buffer_put_subbuf, which
depend on the producer to push the reader position.
Contact: Julien Desfossez <julien.desfossez@polymtl.ca>
B) Features
1) Integration of the LTTng 0.x trace clocks into
LTTng 2.0.
Currently using mainline kernel monotonic clock. NMIs can
therefore not be traced, and this causes a significant
performance degradation compared to the LTTng 0.x trace
clocks. Imply the creation of drivers/staging/lttng/arch to
contain the arch-specific clock support files.
* Dependency: addition of clock descriptions to CTF.
See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
for the LTTng 0.x git tree.
2) Port OMAP3 LTTng trace clocks to x86 to support systems
without constant TSC.
* Dependency: (B.1)
See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
for the LTTng 0.x git tree.
3) Implement mmap operation on an anonymous file created by a
LTTNG_KERNEL_CLOCK ioctl to export data to export
synchronized kernel and user-level LTTng trace clocks:
with:
- shared per-cpu data,
- read seqlock.
The content exported by this shared memory area will be
arch-specific.
* Dependency: (B.1) && (B.2)
See: http://git.lttng.org/?p=linux-2.6-lttng.git;a=summary
for the LTTng 0.x git tree, which has vDSO support for
LTTng trace clock on the x86 architecture.
3) Integrate the "statedump" module from LTTng 0.x into LTTng
2.0.
* Dependency: addition of "dynamic enumerations" type to CTF.
See: http://git.lttng.org/?p=lttng-modules.git;a=shortlog;h=refs/heads/v0.19-stable
ltt-statedump.c
4) Generate system call TRACE_EVENT headers for all
architectures (currently done: x86 32/64).
5) Define "unknown" system calls into instrumentation/syscalls
override files / or do SYSCALL_DEFINE improvements to
mainline kernel to allow automatic generation of these
missing system call descriptions.
6) Create missing tracepoint event headers files into
instrumentation/events from headers located in
include/trace/events/. Choice: either do as currently done,
and copy those headers locally into the lttng driver and
perform the modifications locally, or push TRACE_EVENT API
modification into mainline headers, which would require
collaboration from Ftrace/Perf maintainers.
7) Poll: implement a poll and/or epoll exclusive wakeup scheme,
which contradicts POSIX, but protect multiple consumer
threads from thundering herd effect.
8) Re-integrate sample modules from libringbuffer into
lttng driver. Those modules can be used as example of how to
use libringbuffer in other contexts than LTTng, and are
useful to perform benchmarks of the ringbuffer library.
See: http://www.efficios.com/ringbuffer
9) NOHZ support for lib ring buffer. NOHZ infrastructure in the
Linux kernel does not support notifiers chains, which does
not let LTTng play nicely with low power consumption setups
for flight recorder (overwrite mode) live traces. One way to
allow integration between NOHZ and LTTng would be to add
support for such notifiers into NOHZ kernel infrastructure.
10) Turn drivers/staging/lttng/ltt-probes.c probe_list into a
hash table. Turns O(n^2) trace systems registration (cost
for n systems) into O(n). (O(1) per system)
11) drivers/staging/lttng/probes/lttng-ftrace.c:
LTTng currently uses kretprobes for per-function tracing,
not the function tracer. So lttng-ftrace.c should be used
for "all" function tracing.
12) drivers/staging/lttng/probes/lttng-types.c:
This is a currently unused placeholder to export entire C
type declarations into the trace metadata, e.g. for support
of describing the layout of structures/enumeration mapping
along with syscall entry events. The design of this support
will likely change though, and become integrated with the
TRACE_EVENT support within lttng, by adding new macros, and
support for generation of metadata from these macros, to
allow description of those compound types/enumerations.
Please send patches
To: Greg Kroah-Hartman <greg@kroah.com>
To: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>

View file

@ -1,7 +0,0 @@
The workflow for updating patches from newer kernel:
Diff mainline/ and lttng-module/ directories.
Pull the new headers from mainline kernel to mainline/.
Copy them into lttng-modules.
Apply diff. Fix conflicts.

View file

@ -1,626 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM block
#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BLOCK_H
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#ifndef _TRACE_BLOCK_DEF_
#define _TRACE_BLOCK_DEF_
#define __blk_dump_cmd(cmd, len) "<unknown>"
enum {
RWBS_FLAG_WRITE = (1 << 0),
RWBS_FLAG_DISCARD = (1 << 1),
RWBS_FLAG_READ = (1 << 2),
RWBS_FLAG_RAHEAD = (1 << 3),
RWBS_FLAG_SYNC = (1 << 4),
RWBS_FLAG_META = (1 << 5),
RWBS_FLAG_SECURE = (1 << 6),
};
#endif /* _TRACE_BLOCK_DEF_ */
#define __print_rwbs_flags(rwbs) \
__print_flags(rwbs, "", \
{ RWBS_FLAG_WRITE, "W" }, \
{ RWBS_FLAG_DISCARD, "D" }, \
{ RWBS_FLAG_READ, "R" }, \
{ RWBS_FLAG_RAHEAD, "A" }, \
{ RWBS_FLAG_SYNC, "S" }, \
{ RWBS_FLAG_META, "M" }, \
{ RWBS_FLAG_SECURE, "E" })
#define blk_fill_rwbs(rwbs, rw, bytes) \
tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
( (bytes) ? RWBS_FLAG_READ : \
( 0 )))) \
| ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
| ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
| ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
| ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
DECLARE_EVENT_CLASS(block_rq_with_error,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int, errors )
__field( unsigned int, rwbs )
__dynamic_array_hex( unsigned char, cmd,
(rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
rq->cmd_len : 0)
),
TP_fast_assign(
tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq))
tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq))
tp_assign(errors, rq->errors)
blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
rq->cmd : NULL);
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
__blk_dump_cmd(__get_dynamic_array(cmd),
__get_dynamic_array_len(cmd)),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->errors)
)
/**
* block_rq_abort - abort block operation request
* @q: queue containing the block operation request
* @rq: block IO operation request
*
* Called immediately after pending block IO operation request @rq in
* queue @q is aborted. The fields in the operation request @rq
* can be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_abort,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
)
/**
* block_rq_requeue - place block IO request back on a queue
* @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
* @q. For some reason the request was not completed and needs to be
* put back in the queue.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
)
/**
* block_rq_complete - block IO operation completed by device driver
* @q: queue containing the block operation request
* @rq: block operations request
*
* The block_rq_complete tracepoint event indicates that some portion
* of operation request has been completed by the device driver. If
* the @rq->bio is %NULL, then there is absolutely no additional work to
* do for the request. If @rq->bio is non-NULL then there is
* additional work required to complete the request.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_complete,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
)
DECLARE_EVENT_CLASS(block_rq,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
__field( unsigned int, rwbs )
__array_text( char, comm, TASK_COMM_LEN )
__dynamic_array_hex( unsigned char, cmd,
(rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
rq->cmd_len : 0)
),
TP_fast_assign(
tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq))
tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq))
tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0)
blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
rq->cmd : NULL);
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
__entry->bytes,
__blk_dump_cmd(__get_dynamic_array(cmd),
__get_dynamic_array_len(cmd)),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
)
/**
* block_rq_insert - insert block operation request into queue
* @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
* into queue @q. The fields in the operation request @rq struct can
* be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq, block_rq_insert,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
)
/**
* block_rq_issue - issue pending block IO request operation to device driver
* @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
)
/**
* block_bio_bounce - used bounce buffer when processing block operation
* @q: queue holding the block operation
* @bio: block operation
*
* A bounce buffer was used to handle the block operation @bio in @q.
* This occurs when hardware limitations prevent a direct transfer of
* data between the @bio data memory area and the IO device. Use of a
* bounce buffer requires extra copying of data and decreases
* performance.
*/
TRACE_EVENT(block_bio_bounce,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_assign(dev, bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0)
tp_assign(sector, bio->bi_sector)
tp_assign(nr_sector, bio->bi_size >> 9)
blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
)
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
TP_PROTO(struct request_queue *q, struct bio *bio, int error),
TP_ARGS(q, bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned, nr_sector )
__field( int, error )
__field( unsigned int, rwbs )
),
TP_fast_assign(
tp_assign(dev, bio->bi_bdev->bd_dev)
tp_assign(sector, bio->bi_sector)
tp_assign(nr_sector, bio->bi_size >> 9)
tp_assign(error, error)
blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
),
TP_printk("%d,%d %s %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->error)
)
DECLARE_EVENT_CLASS(block_bio,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_assign(dev, bio->bi_bdev->bd_dev)
tp_assign(sector, bio->bi_sector)
tp_assign(nr_sector, bio->bi_size >> 9)
blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
)
/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @q: queue holding operation
* @bio: new block operation to merge
*
* Merging block request @bio to the end of an existing block request
* in queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_backmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
)
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
* @q: queue holding operation
* @bio: new block operation to merge
*
* Merging block IO operation @bio to the beginning of an existing block
* operation in queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_frontmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
)
/**
* block_bio_queue - putting new block IO operation in queue
* @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
)
DECLARE_EVENT_CLASS(block_get_rq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, rwbs )
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
tp_assign(sector, bio ? bio->bi_sector : 0)
tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
bio ? bio->bi_size >> 9 : 0)
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
)
/**
* block_getrq - get a free request entry in queue for block IO operations
* @q: queue for operations
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* A request struct for queue @q has been allocated to handle the
* block IO operation @bio.
*/
DEFINE_EVENT(block_get_rq, block_getrq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
)
/**
* block_sleeprq - waiting to get a free request entry in queue for block IO operation
* @q: queue for operation
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* In the case where a request struct cannot be provided for queue @q
* the process needs to wait for an request struct to become
* available. This tracepoint event is generated each time the
* process goes to sleep waiting for request struct become available.
*/
DEFINE_EVENT(block_get_rq, block_sleeprq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
)
/**
* block_plug - keep operations requests in request queue
* @q: request queue to plug
*
* Plug the request queue @q. Do not allow block operation requests
* to be sent to the device driver. Instead, accumulate requests in
* the queue to improve throughput performance of the block device.
*/
TRACE_EVENT(block_plug,
TP_PROTO(struct request_queue *q),
TP_ARGS(q),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("[%s]", __entry->comm)
)
DECLARE_EVENT_CLASS(block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit),
TP_STRUCT__entry(
__field( int, nr_rq )
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_assign(nr_rq, depth)
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
)
/**
* block_unplug - release of operations requests in request queue
* @q: request queue to unplug
* @depth: number of requests just added to the queue
* @explicit: whether this was an explicit unplug, or one from schedule()
*
* Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue.
*/
DEFINE_EVENT(block_unplug, block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit)
)
/**
* block_split - split a single bio struct into two bio structs
* @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
* The bio request @bio in request queue @q needs to be split into two
* bio requests. The newly created @bio request starts at
* @new_sector. This split may be required due to hardware limitation
* such as operation crossing device boundaries in a RAID system.
*/
TRACE_EVENT(block_split,
TP_PROTO(struct request_queue *q, struct bio *bio,
unsigned int new_sector),
TP_ARGS(q, bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( sector_t, new_sector )
__field( unsigned int, rwbs )
__array_text( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
tp_assign(dev, bio->bi_bdev->bd_dev)
tp_assign(sector, bio->bi_sector)
tp_assign(new_sector, new_sector)
blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
),
TP_printk("%d,%d %s %llu / %llu [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
(unsigned long long)__entry->new_sector,
__entry->comm)
)
/**
* block_bio_remap - map request for a logical device to the raw device
* @q: queue holding the operation
* @bio: revised operation
* @dev: device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
* raw block device.
*/
TRACE_EVENT(block_bio_remap,
TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
sector_t from),
TP_ARGS(q, bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__field( unsigned int, rwbs )
),
TP_fast_assign(
tp_assign(dev, bio->bi_bdev->bd_dev)
tp_assign(sector, bio->bi_sector)
tp_assign(nr_sector, bio->bi_size >> 9)
tp_assign(old_dev, dev)
tp_assign(old_sector, from)
blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector)
)
/**
* block_rq_remap - map request for a block operation request
* @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
*
* The block operation request @rq in @q has been remapped. The block
* operation request @rq holds the current information and @from hold
* the original sector.
*/
TRACE_EVENT(block_rq_remap,
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
sector_t from),
TP_ARGS(q, rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__field( unsigned int, rwbs )
),
TP_fast_assign(
tp_assign(dev, disk_devt(rq->rq_disk))
tp_assign(sector, blk_rq_pos(rq))
tp_assign(nr_sector, blk_rq_sectors(rq))
tp_assign(old_dev, dev)
tp_assign(old_sector, from)
blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__print_rwbs_flags(__entry->rwbs),
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector)
)
#undef __print_rwbs_flags
#undef blk_fill_rwbs
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,155 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
#ifndef _TRACE_IRQ_DEF_
#define _TRACE_IRQ_DEF_
struct irqaction;
struct softirq_action;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
#endif /* _TRACE_IRQ_DEF_ */
/**
* irq_handler_entry - called immediately before the irq action handler
* @irq: irq number
* @action: pointer to struct irqaction
*
* The struct irqaction pointed to by @action contains various
* information about the handler, including the device name,
* @action->name, and the device id, @action->dev_id. When used in
* conjunction with the irq_handler_exit tracepoint, we can figure
* out irq handler latencies.
*/
TRACE_EVENT(irq_handler_entry,
TP_PROTO(int irq, struct irqaction *action),
TP_ARGS(irq, action),
TP_STRUCT__entry(
__field( int, irq )
__string( name, action->name )
),
TP_fast_assign(
tp_assign(irq, irq)
tp_strcpy(name, action->name)
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
)
/**
* irq_handler_exit - called immediately after the irq action handler returns
* @irq: irq number
* @action: pointer to struct irqaction
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
TRACE_EVENT(irq_handler_exit,
TP_PROTO(int irq, struct irqaction *action, int ret),
TP_ARGS(irq, action, ret),
TP_STRUCT__entry(
__field( int, irq )
__field( int, ret )
),
TP_fast_assign(
tp_assign(irq, irq)
tp_assign(ret, ret)
),
TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
)
DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr),
TP_STRUCT__entry(
__field( unsigned int, vec )
),
TP_fast_assign(
tp_assign(vec, vec_nr)
),
TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
)
/**
* softirq_entry - called immediately before the softirq handler
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_entry,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
/**
* softirq_exit - called immediately after the softirq handler returns
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_exit,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
/**
* softirq_raise - called immediately when a softirq is raised
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
)
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,312 +0,0 @@
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_MAIN_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
#define kvm_trace_exit_reason \
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
TP_ARGS(reason, errno),
TP_STRUCT__entry(
__field( __u32, reason )
__field( int, errno )
),
TP_fast_assign(
tp_assign(reason, reason)
tp_assign(errno, errno)
),
TP_printk("reason %s (%d)",
__entry->errno < 0 ?
(__entry->errno == -EINTR ? "restart" : "error") :
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
__entry->errno < 0 ? -__entry->errno : __entry->reason)
)
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
TP_STRUCT__entry(
__field( unsigned int, gsi )
__field( int, level )
__field( int, irq_source_id )
),
TP_fast_assign(
tp_assign(gsi, gsi)
tp_assign(level, level)
tp_assign(irq_source_id, irq_source_id)
),
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
)
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
{0x2, "SMI"}, \
{0x3, "Res3"}, \
{0x4, "NMI"}, \
{0x5, "INIT"}, \
{0x6, "SIPI"}, \
{0x7, "ExtINT"}
TRACE_EVENT(kvm_ioapic_set_irq,
TP_PROTO(__u64 e, int pin, bool coalesced),
TP_ARGS(e, pin, coalesced),
TP_STRUCT__entry(
__field( __u64, e )
__field( int, pin )
__field( bool, coalesced )
),
TP_fast_assign(
tp_assign(e, e)
tp_assign(pin, pin)
tp_assign(coalesced, coalesced)
),
TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "",
__entry->coalesced ? " (coalesced)" : "")
)
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
TP_STRUCT__entry(
__field( __u64, address )
__field( __u64, data )
),
TP_fast_assign(
tp_assign(address, address)
tp_assign(data, data)
),
TP_printk("dst %u vec %x (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
(__entry->address & (1<<3)) ? "|rh" : "")
)
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
TP_STRUCT__entry(
__field( unsigned int, irqchip )
__field( unsigned int, pin )
),
TP_fast_assign(
tp_assign(irqchip, irqchip)
tp_assign(pin, pin)
),
TP_printk("irqchip %s pin %u",
__print_symbolic(__entry->irqchip, kvm_irqchips),
__entry->pin)
)
#endif /* defined(__KVM_HAVE_IOAPIC) */
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
#define kvm_trace_symbol_mmio \
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
{ KVM_TRACE_MMIO_READ, "read" }, \
{ KVM_TRACE_MMIO_WRITE, "write" }
TRACE_EVENT(kvm_mmio,
TP_PROTO(int type, int len, u64 gpa, u64 val),
TP_ARGS(type, len, gpa, val),
TP_STRUCT__entry(
__field( u32, type )
__field( u32, len )
__field( u64, gpa )
__field( u64, val )
),
TP_fast_assign(
tp_assign(type, type)
tp_assign(len, len)
tp_assign(gpa, gpa)
tp_assign(val, val)
),
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
__entry->len, __entry->gpa, __entry->val)
)
#define kvm_fpu_load_symbol \
{0, "unload"}, \
{1, "load"}
TRACE_EVENT(kvm_fpu,
TP_PROTO(int load),
TP_ARGS(load),
TP_STRUCT__entry(
__field( u32, load )
),
TP_fast_assign(
tp_assign(load, load)
),
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
)
TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
TP_ARGS(hva, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
__field( u8, referenced )
),
TP_fast_assign(
tp_assign(hva, hva)
tp_assign(gfn,
slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT))
tp_assign(referenced, ref)
),
TP_printk("hva %llx gfn %llx %s",
__entry->hva, __entry->gfn,
__entry->referenced ? "YOUNG" : "OLD")
)
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
__field(__u64, gva)
__field(u64, gfn)
),
TP_fast_assign(
tp_assign(gva, gva)
tp_assign(gfn, gfn)
),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
)
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
)
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
)
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
tp_assign(token, token)
tp_assign(gva, gva)
),
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
)
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
)
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
)
TRACE_EVENT(
kvm_async_pf_completed,
TP_PROTO(unsigned long address, struct page *page, u64 gva),
TP_ARGS(address, page, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
__field(pfn_t, pfn)
__field(u64, gva)
),
TP_fast_assign(
tp_assign(address, address)
tp_assign(pfn, page ? page_to_pfn(page) : 0)
tp_assign(gva, gva)
),
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
__entry->address, __entry->pfn)
)
#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,34 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM lttng
#if !defined(_TRACE_LTTNG_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_LTTNG_H
#include <linux/tracepoint.h>
TRACE_EVENT(lttng_metadata,
TP_PROTO(const char *str),
TP_ARGS(str),
/*
* Not exactly a string: more a sequence of bytes (dynamic
* array) without the length. This is a dummy anyway: we only
* use this declaration to generate an event metadata entry.
*/
TP_STRUCT__entry(
__string( str, str )
),
TP_fast_assign(
tp_strcpy(str, str)
),
TP_printk("")
)
#endif /* _TRACE_LTTNG_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,400 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
#ifndef _TRACE_SCHED_DEF_
#define _TRACE_SCHED_DEF_
static inline long __trace_sched_switch_state(struct task_struct *p)
{
long state = p->state;
#ifdef CONFIG_PREEMPT
/*
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
state = TASK_RUNNING;
#endif
return state;
}
#endif /* _TRACE_SCHED_DEF_ */
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
),
TP_fast_assign(
tp_memcpy(comm, t->comm, TASK_COMM_LEN)
tp_assign(tid, t->pid)
),
TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
)
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
tp_assign(ret, ret)
),
TP_printk("ret=%d", __entry->ret)
)
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio)
tp_assign(success, success)
tp_assign(target_cpu, task_cpu(p))
),
TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->tid, __entry->prio,
__entry->success, __entry->target_cpu)
)
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success))
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success))
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev,
struct task_struct *next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__array_text( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_tid )
__field( int, prev_prio )
__field( long, prev_state )
__array_text( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_tid )
__field( int, next_prio )
),
TP_fast_assign(
tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
tp_assign(prev_tid, prev->pid)
tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
tp_assign(prev_state, __trace_sched_switch_state(prev))
tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
tp_assign(next_tid, next->pid)
tp_assign(next_prio, next->prio - MAX_RT_PRIO)
),
TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
__entry->prev_state ?
__print_flags(__entry->prev_state, "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
__entry->next_comm, __entry->next_tid, __entry->next_prio)
)
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio - MAX_RT_PRIO)
tp_assign(orig_cpu, task_cpu(p))
tp_assign(dest_cpu, dest_cpu)
),
TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->tid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
)
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
),
TP_fast_assign(
tp_memcpy(comm, p->comm, TASK_COMM_LEN)
tp_assign(tid, p->pid)
tp_assign(prio, p->prio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d prio=%d",
__entry->comm, __entry->tid, __entry->prio)
)
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p))
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, prio )
),
TP_fast_assign(
tp_memcpy(comm, current->comm, TASK_COMM_LEN)
tp_assign(tid, pid_nr(pid))
tp_assign(prio, current->prio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d prio=%d",
__entry->comm, __entry->tid, __entry->prio)
)
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array_text( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_tid )
__array_text( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_tid )
),
TP_fast_assign(
tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
tp_assign(parent_tid, parent->pid)
tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
tp_assign(child_tid, child->pid)
),
TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
__entry->parent_comm, __entry->parent_tid,
__entry->child_comm, __entry->child_tid)
)
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( u64, delay )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(delay, delay)
)
TP_perf_assign(
__perf_count(delay)
),
TP_printk("comm=%s tid=%d delay=%Lu [ns]",
__entry->comm, __entry->tid,
(unsigned long long)__entry->delay)
)
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay))
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
TRACE_EVENT(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(runtime, runtime)
tp_assign(vruntime, vruntime)
)
TP_perf_assign(
__perf_count(runtime)
),
TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->tid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
)
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array_text( char, comm, TASK_COMM_LEN )
__field( pid_t, tid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
tp_assign(tid, tsk->pid)
tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
tp_assign(newprio, newprio - MAX_RT_PRIO)
),
TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->tid,
__entry->oldprio, __entry->newprio)
)
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,76 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H
#include <linux/tracepoint.h>
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
#ifndef _TRACE_SYSCALLS_DEF_
#define _TRACE_SYSCALLS_DEF_
#include <asm/ptrace.h>
#include <asm/syscall.h>
#endif /* _TRACE_SYSCALLS_DEF_ */
TRACE_EVENT(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
TP_ARGS(regs, id),
TP_STRUCT__entry(
__field( long, id )
__array( unsigned long, args, 6 )
),
TP_fast_assign(
tp_assign(id, id)
{
tp_memcpy(args,
({
unsigned long args_copy[6];
syscall_get_arguments(current, regs,
0, 6, args_copy);
args_copy;
}), 6 * sizeof(unsigned long));
}
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
__entry->id,
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5])
)
TRACE_EVENT(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field( long, id )
__field( long, ret )
),
TP_fast_assign(
tp_assign(id, syscall_get_nr(current, regs))
tp_assign(ret, ret)
),
TP_printk("NR %ld = %ld",
__entry->id, __entry->ret)
)
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#endif /* _TRACE_EVENTS_SYSCALLS_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,569 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM block
#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BLOCK_H
#include <linux/blktrace_api.h>
#include <linux/blkdev.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(block_rq_with_error,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( int, errors )
__array( char, rwbs, 6 )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq);
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq);
__entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
),
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->errors)
);
/**
* block_rq_abort - abort block operation request
* @q: queue containing the block operation request
* @rq: block IO operation request
*
* Called immediately after pending block IO operation request @rq in
* queue @q is aborted. The fields in the operation request @rq
* can be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_abort,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_requeue - place block IO request back on a queue
* @q: queue holding operation
* @rq: block IO operation request
*
* The block operation request @rq is being placed back into queue
* @q. For some reason the request was not completed and needs to be
* put back in the queue.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_complete - block IO operation completed by device driver
* @q: queue containing the block operation request
* @rq: block operations request
*
* The block_rq_complete tracepoint event indicates that some portion
* of operation request has been completed by the device driver. If
* the @rq->bio is %NULL, then there is absolutely no additional work to
* do for the request. If @rq->bio is non-NULL then there is
* additional work required to complete the request.
*/
DEFINE_EVENT(block_rq_with_error, block_rq_complete,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
DECLARE_EVENT_CLASS(block_rq,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( unsigned int, bytes )
__array( char, rwbs, 6 )
__array( char, comm, TASK_COMM_LEN )
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
),
TP_fast_assign(
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_pos(rq);
__entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
0 : blk_rq_sectors(rq);
__entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
blk_rq_bytes(rq) : 0;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
blk_dump_cmd(__get_str(cmd), rq);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __entry->bytes, __get_str(cmd),
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_rq_insert - insert block operation request into queue
* @q: target queue
* @rq: block IO operation request
*
* Called immediately before block operation request @rq is inserted
* into queue @q. The fields in the operation request @rq struct can
* be examined to determine which device and sectors the pending
* operation would access.
*/
DEFINE_EVENT(block_rq, block_rq_insert,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_rq_issue - issue pending block IO request operation to device driver
* @q: queue holding operation
* @rq: block IO operation operation request
*
* Called when block operation request @rq from queue @q is sent to a
* device driver for processing.
*/
DEFINE_EVENT(block_rq, block_rq_issue,
TP_PROTO(struct request_queue *q, struct request *rq),
TP_ARGS(q, rq)
);
/**
* block_bio_bounce - used bounce buffer when processing block operation
* @q: queue holding the block operation
* @bio: block operation
*
* A bounce buffer was used to handle the block operation @bio in @q.
* This occurs when hardware limitations prevent a direct transfer of
* data between the @bio data memory area and the IO device. Use of a
* bounce buffer requires extra copying of data and decreases
* performance.
*/
TRACE_EVENT(block_bio_bounce,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, 6 )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_complete - completed all work on the block operation
* @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
* This tracepoint indicates there is no further work to do on this
* block IO operation @bio.
*/
TRACE_EVENT(block_bio_complete,
TP_PROTO(struct request_queue *q, struct bio *bio, int error),
TP_ARGS(q, bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned, nr_sector )
__field( int, error )
__array( char, rwbs, 6 )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->error = error;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
),
TP_printk("%d,%d %s %llu + %u [%d]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->error)
);
DECLARE_EVENT_CLASS(block_bio,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, 6 )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_bio_backmerge - merging block operation to the end of an existing operation
* @q: queue holding operation
* @bio: new block operation to merge
*
* Merging block request @bio to the end of an existing block request
* in queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_backmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
);
/**
* block_bio_frontmerge - merging block operation to the beginning of an existing operation
* @q: queue holding operation
* @bio: new block operation to merge
*
* Merging block IO operation @bio to the beginning of an existing block
* operation in queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_frontmerge,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
);
/**
* block_bio_queue - putting new block IO operation in queue
* @q: queue holding operation
* @bio: new block operation
*
* About to place the block IO operation @bio into queue @q.
*/
DEFINE_EVENT(block_bio, block_bio_queue,
TP_PROTO(struct request_queue *q, struct bio *bio),
TP_ARGS(q, bio)
);
DECLARE_EVENT_CLASS(block_get_rq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__array( char, rwbs, 6 )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_sector : 0;
__entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_rw : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu + %u [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector, __entry->comm)
);
/**
* block_getrq - get a free request entry in queue for block IO operations
* @q: queue for operations
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* A request struct for queue @q has been allocated to handle the
* block IO operation @bio.
*/
DEFINE_EVENT(block_get_rq, block_getrq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
);
/**
* block_sleeprq - waiting to get a free request entry in queue for block IO operation
* @q: queue for operation
* @bio: pending block IO operation
* @rw: low bit indicates a read (%0) or a write (%1)
*
* In the case where a request struct cannot be provided for queue @q
* the process needs to wait for an request struct to become
* available. This tracepoint event is generated each time the
* process goes to sleep waiting for request struct become available.
*/
DEFINE_EVENT(block_get_rq, block_sleeprq,
TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
TP_ARGS(q, bio, rw)
);
/**
* block_plug - keep operations requests in request queue
* @q: request queue to plug
*
* Plug the request queue @q. Do not allow block operation requests
* to be sent to the device driver. Instead, accumulate requests in
* the queue to improve throughput performance of the block device.
*/
TRACE_EVENT(block_plug,
TP_PROTO(struct request_queue *q),
TP_ARGS(q),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("[%s]", __entry->comm)
);
DECLARE_EVENT_CLASS(block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit),
TP_STRUCT__entry(
__field( int, nr_rq )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->nr_rq = depth;
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
);
/**
* block_unplug - release of operations requests in request queue
* @q: request queue to unplug
* @depth: number of requests just added to the queue
* @explicit: whether this was an explicit unplug, or one from schedule()
*
* Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue.
*/
DEFINE_EVENT(block_unplug, block_unplug,
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
TP_ARGS(q, depth, explicit)
);
/**
* block_split - split a single bio struct into two bio structs
* @q: queue containing the bio
* @bio: block operation being split
* @new_sector: The starting sector for the new bio
*
* The bio request @bio in request queue @q needs to be split into two
* bio requests. The newly created @bio request starts at
* @new_sector. This split may be required due to hardware limitation
* such as operation crossing device boundaries in a RAID system.
*/
TRACE_EVENT(block_split,
TP_PROTO(struct request_queue *q, struct bio *bio,
unsigned int new_sector),
TP_ARGS(q, bio, new_sector),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( sector_t, new_sector )
__array( char, rwbs, 6 )
__array( char, comm, TASK_COMM_LEN )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
__entry->new_sector = new_sector;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
TP_printk("%d,%d %s %llu / %llu [%s]",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
(unsigned long long)__entry->new_sector,
__entry->comm)
);
/**
* block_bio_remap - map request for a logical device to the raw device
* @q: queue holding the operation
* @bio: revised operation
* @dev: device for the operation
* @from: original sector for the operation
*
* An operation for a logical device has been mapped to the
* raw block device.
*/
TRACE_EVENT(block_bio_remap,
TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
sector_t from),
TP_ARGS(q, bio, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__array( char, rwbs, 6 )
),
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->old_dev = dev;
__entry->old_sector = from;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector)
);
/**
* block_rq_remap - map request for a block operation request
* @q: queue holding the operation
* @rq: block IO operation request
* @dev: device for the operation
* @from: original sector for the operation
*
* The block operation request @rq in @q has been remapped. The block
* operation request @rq holds the current information and @from hold
* the original sector.
*/
TRACE_EVENT(block_rq_remap,
TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
sector_t from),
TP_ARGS(q, rq, dev, from),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
__field( dev_t, old_dev )
__field( sector_t, old_sector )
__array( char, rwbs, 6 )
),
TP_fast_assign(
__entry->dev = disk_devt(rq->rq_disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
__entry->old_sector = from;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
),
TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
(unsigned long long)__entry->sector,
__entry->nr_sector,
MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
(unsigned long long)__entry->old_sector)
);
#endif /* _TRACE_BLOCK_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -1,150 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM irq
#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IRQ_H
#include <linux/tracepoint.h>
struct irqaction;
struct softirq_action;
#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \
__print_symbolic(val, \
softirq_name(HI), \
softirq_name(TIMER), \
softirq_name(NET_TX), \
softirq_name(NET_RX), \
softirq_name(BLOCK), \
softirq_name(BLOCK_IOPOLL), \
softirq_name(TASKLET), \
softirq_name(SCHED), \
softirq_name(HRTIMER), \
softirq_name(RCU))
/**
* irq_handler_entry - called immediately before the irq action handler
* @irq: irq number
* @action: pointer to struct irqaction
*
* The struct irqaction pointed to by @action contains various
* information about the handler, including the device name,
* @action->name, and the device id, @action->dev_id. When used in
* conjunction with the irq_handler_exit tracepoint, we can figure
* out irq handler latencies.
*/
TRACE_EVENT(irq_handler_entry,
TP_PROTO(int irq, struct irqaction *action),
TP_ARGS(irq, action),
TP_STRUCT__entry(
__field( int, irq )
__string( name, action->name )
),
TP_fast_assign(
__entry->irq = irq;
__assign_str(name, action->name);
),
TP_printk("irq=%d name=%s", __entry->irq, __get_str(name))
);
/**
* irq_handler_exit - called immediately after the irq action handler returns
* @irq: irq number
* @action: pointer to struct irqaction
* @ret: return value
*
* If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
* @action->handler scuccessully handled this irq. Otherwise, the irq might be
* a shared irq line, or the irq was not handled successfully. Can be used in
* conjunction with the irq_handler_entry to understand irq handler latencies.
*/
TRACE_EVENT(irq_handler_exit,
TP_PROTO(int irq, struct irqaction *action, int ret),
TP_ARGS(irq, action, ret),
TP_STRUCT__entry(
__field( int, irq )
__field( int, ret )
),
TP_fast_assign(
__entry->irq = irq;
__entry->ret = ret;
),
TP_printk("irq=%d ret=%s",
__entry->irq, __entry->ret ? "handled" : "unhandled")
);
DECLARE_EVENT_CLASS(softirq,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr),
TP_STRUCT__entry(
__field( unsigned int, vec )
),
TP_fast_assign(
__entry->vec = vec_nr;
),
TP_printk("vec=%u [action=%s]", __entry->vec,
show_softirq_name(__entry->vec))
);
/**
* softirq_entry - called immediately before the softirq handler
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_exit tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_entry,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_exit - called immediately after the softirq handler returns
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq handler runtine.
*/
DEFINE_EVENT(softirq, softirq_exit,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
/**
* softirq_raise - called immediately when a softirq is raised
* @vec_nr: softirq vector number
*
* When used in combination with the softirq_entry tracepoint
* we can determine the softirq raise to run latency.
*/
DEFINE_EVENT(softirq, softirq_raise,
TP_PROTO(unsigned int vec_nr),
TP_ARGS(vec_nr)
);
#endif /* _TRACE_IRQ_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -1,312 +0,0 @@
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_KVM_MAIN_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
#define kvm_trace_exit_reason \
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
TP_ARGS(reason, errno),
TP_STRUCT__entry(
__field( __u32, reason )
__field( int, errno )
),
TP_fast_assign(
__entry->reason = reason;
__entry->errno = errno;
),
TP_printk("reason %s (%d)",
__entry->errno < 0 ?
(__entry->errno == -EINTR ? "restart" : "error") :
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
TP_STRUCT__entry(
__field( unsigned int, gsi )
__field( int, level )
__field( int, irq_source_id )
),
TP_fast_assign(
__entry->gsi = gsi;
__entry->level = level;
__entry->irq_source_id = irq_source_id;
),
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
{0x2, "SMI"}, \
{0x3, "Res3"}, \
{0x4, "NMI"}, \
{0x5, "INIT"}, \
{0x6, "SIPI"}, \
{0x7, "ExtINT"}
TRACE_EVENT(kvm_ioapic_set_irq,
TP_PROTO(__u64 e, int pin, bool coalesced),
TP_ARGS(e, pin, coalesced),
TP_STRUCT__entry(
__field( __u64, e )
__field( int, pin )
__field( bool, coalesced )
),
TP_fast_assign(
__entry->e = e;
__entry->pin = pin;
__entry->coalesced = coalesced;
),
TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
(__entry->e & (1<<15)) ? "level" : "edge",
(__entry->e & (1<<16)) ? "|masked" : "",
__entry->coalesced ? " (coalesced)" : "")
);
TRACE_EVENT(kvm_msi_set_irq,
TP_PROTO(__u64 address, __u64 data),
TP_ARGS(address, data),
TP_STRUCT__entry(
__field( __u64, address )
__field( __u64, data )
),
TP_fast_assign(
__entry->address = address;
__entry->data = data;
),
TP_printk("dst %u vec %x (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
(__entry->data & (1<<15)) ? "level" : "edge",
(__entry->address & (1<<3)) ? "|rh" : "")
);
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
TP_STRUCT__entry(
__field( unsigned int, irqchip )
__field( unsigned int, pin )
),
TP_fast_assign(
__entry->irqchip = irqchip;
__entry->pin = pin;
),
TP_printk("irqchip %s pin %u",
__print_symbolic(__entry->irqchip, kvm_irqchips),
__entry->pin)
);
#endif /* defined(__KVM_HAVE_IOAPIC) */
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
#define kvm_trace_symbol_mmio \
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
{ KVM_TRACE_MMIO_READ, "read" }, \
{ KVM_TRACE_MMIO_WRITE, "write" }
TRACE_EVENT(kvm_mmio,
TP_PROTO(int type, int len, u64 gpa, u64 val),
TP_ARGS(type, len, gpa, val),
TP_STRUCT__entry(
__field( u32, type )
__field( u32, len )
__field( u64, gpa )
__field( u64, val )
),
TP_fast_assign(
__entry->type = type;
__entry->len = len;
__entry->gpa = gpa;
__entry->val = val;
),
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
__entry->len, __entry->gpa, __entry->val)
);
#define kvm_fpu_load_symbol \
{0, "unload"}, \
{1, "load"}
TRACE_EVENT(kvm_fpu,
TP_PROTO(int load),
TP_ARGS(load),
TP_STRUCT__entry(
__field( u32, load )
),
TP_fast_assign(
__entry->load = load;
),
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
);
TRACE_EVENT(kvm_age_page,
TP_PROTO(ulong hva, struct kvm_memory_slot *slot, int ref),
TP_ARGS(hva, slot, ref),
TP_STRUCT__entry(
__field( u64, hva )
__field( u64, gfn )
__field( u8, referenced )
),
TP_fast_assign(
__entry->hva = hva;
__entry->gfn =
slot->base_gfn + ((hva - slot->userspace_addr) >> PAGE_SHIFT);
__entry->referenced = ref;
),
TP_printk("hva %llx gfn %llx %s",
__entry->hva, __entry->gfn,
__entry->referenced ? "YOUNG" : "OLD")
);
#ifdef CONFIG_KVM_ASYNC_PF
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
__field(__u64, gva)
__field(u64, gfn)
),
TP_fast_assign(
__entry->gva = gva;
__entry->gfn = gfn;
),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
TP_PROTO(u64 gva, u64 gfn),
TP_ARGS(gva, gfn)
);
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
),
TP_fast_assign(
__entry->token = token;
__entry->gva = gva;
),
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
TP_PROTO(u64 token, u64 gva),
TP_ARGS(token, gva)
);
TRACE_EVENT(
kvm_async_pf_completed,
TP_PROTO(unsigned long address, struct page *page, u64 gva),
TP_ARGS(address, page, gva),
TP_STRUCT__entry(
__field(unsigned long, address)
__field(pfn_t, pfn)
__field(u64, gva)
),
TP_fast_assign(
__entry->address = address;
__entry->pfn = page ? page_to_pfn(page) : 0;
__entry->gva = gva;
),
TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
__entry->address, __entry->pfn)
);
#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -1,397 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SCHED_H
#include <linux/sched.h>
#include <linux/tracepoint.h>
/*
* Tracepoint for calling kthread_stop, performed to end a kthread:
*/
TRACE_EVENT(sched_kthread_stop,
TP_PROTO(struct task_struct *t),
TP_ARGS(t),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
),
TP_fast_assign(
memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
__entry->pid = t->pid;
),
TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
);
/*
* Tracepoint for the return value of the kthread stopping:
*/
TRACE_EVENT(sched_kthread_stop_ret,
TP_PROTO(int ret),
TP_ARGS(ret),
TP_STRUCT__entry(
__field( int, ret )
),
TP_fast_assign(
__entry->ret = ret;
),
TP_printk("ret=%d", __entry->ret)
);
/*
* Tracepoint for waking up a task:
*/
DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, success )
__field( int, target_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
__entry->comm, __entry->pid, __entry->prio,
__entry->success, __entry->target_cpu)
);
DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
/*
* Tracepoint for waking up a new task:
*/
DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
TP_PROTO(struct task_struct *p, int success),
TP_ARGS(p, success));
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(struct task_struct *p)
{
long state = p->state;
#ifdef CONFIG_PREEMPT
/*
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
state = TASK_RUNNING;
#endif
return state;
}
#endif
/*
* Tracepoint for task switches, performed by the scheduler:
*/
TRACE_EVENT(sched_switch,
TP_PROTO(struct task_struct *prev,
struct task_struct *next),
TP_ARGS(prev, next),
TP_STRUCT__entry(
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
__field( long, prev_state )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
),
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
__entry->prev_state = __trace_sched_switch_state(prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
__entry->prev_state ?
__print_flags(__entry->prev_state, "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
/*
* Tracepoint for a task being migrated:
*/
TRACE_EVENT(sched_migrate_task,
TP_PROTO(struct task_struct *p, int dest_cpu),
TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
__field( int, orig_cpu )
__field( int, dest_cpu )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
__entry->comm, __entry->pid, __entry->prio,
__entry->orig_cpu, __entry->dest_cpu)
);
DECLARE_EVENT_CLASS(sched_process_template,
TP_PROTO(struct task_struct *p),
TP_ARGS(p),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for freeing a task:
*/
DEFINE_EVENT(sched_process_template, sched_process_free,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a task exiting:
*/
DEFINE_EVENT(sched_process_template, sched_process_exit,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for waiting on task to unschedule:
*/
DEFINE_EVENT(sched_process_template, sched_wait_task,
TP_PROTO(struct task_struct *p),
TP_ARGS(p));
/*
* Tracepoint for a waiting task:
*/
TRACE_EVENT(sched_process_wait,
TP_PROTO(struct pid *pid),
TP_ARGS(pid),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, prio )
),
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
__entry->prio = current->prio;
),
TP_printk("comm=%s pid=%d prio=%d",
__entry->comm, __entry->pid, __entry->prio)
);
/*
* Tracepoint for do_fork:
*/
TRACE_EVENT(sched_process_fork,
TP_PROTO(struct task_struct *parent, struct task_struct *child),
TP_ARGS(parent, child),
TP_STRUCT__entry(
__array( char, parent_comm, TASK_COMM_LEN )
__field( pid_t, parent_pid )
__array( char, child_comm, TASK_COMM_LEN )
__field( pid_t, child_pid )
),
TP_fast_assign(
memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
__entry->parent_pid = parent->pid;
memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
__entry->child_pid = child->pid;
),
TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
__entry->parent_comm, __entry->parent_pid,
__entry->child_comm, __entry->child_pid)
);
/*
* XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
* adding sched_stat support to SCHED_FIFO/RR would be welcome.
*/
DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, delay )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
)
TP_perf_assign(
__perf_count(delay);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->delay)
);
/*
* Tracepoint for accounting wait time (time the task is runnable
* but not actually running due to scheduler contention).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_wait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting sleep time (time the task is not runnable,
* including iowait, see below).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting iowait time (time the task is not runnable
* due to waiting on IO to complete).
*/
DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
TP_PROTO(struct task_struct *tsk, u64 delay),
TP_ARGS(tsk, delay));
/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
TRACE_EVENT(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
TP_ARGS(tsk, runtime, vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( u64, runtime )
__field( u64, vruntime )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
)
TP_perf_assign(
__perf_count(runtime);
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
__entry->comm, __entry->pid,
(unsigned long long)__entry->runtime,
(unsigned long long)__entry->vruntime)
);
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
*/
TRACE_EVENT(sched_pi_setprio,
TP_PROTO(struct task_struct *tsk, int newprio),
TP_ARGS(tsk, newprio),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
__field( int, oldprio )
__field( int, newprio )
),
TP_fast_assign(
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
__entry->newprio = newprio;
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
__entry->comm, __entry->pid,
__entry->oldprio, __entry->newprio)
);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -1,75 +0,0 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM raw_syscalls
#define TRACE_INCLUDE_FILE syscalls
#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_EVENTS_SYSCALLS_H
#include <linux/tracepoint.h>
#include <asm/ptrace.h>
#include <asm/syscall.h>
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
extern void syscall_regfunc(void);
extern void syscall_unregfunc(void);
TRACE_EVENT_FN(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
TP_ARGS(regs, id),
TP_STRUCT__entry(
__field( long, id )
__array( unsigned long, args, 6 )
),
TP_fast_assign(
__entry->id = id;
syscall_get_arguments(current, regs, 0, 6, __entry->args);
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
__entry->id,
__entry->args[0], __entry->args[1], __entry->args[2],
__entry->args[3], __entry->args[4], __entry->args[5]),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
TRACE_EVENT_FN(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field( long, id )
__field( long, ret )
),
TP_fast_assign(
__entry->id = syscall_get_nr(current, regs);
__entry->ret = ret;
),
TP_printk("NR %ld = %ld",
__entry->id, __entry->ret),
syscall_regfunc, syscall_unregfunc
);
TRACE_EVENT_FLAGS(sys_exit, TRACE_EVENT_FL_CAP_ANY)
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#endif /* _TRACE_EVENTS_SYSCALLS_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View file

@ -1,263 +0,0 @@
syscall sys_read nr 0 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
syscall sys_write nr 1 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
syscall sys_open nr 2 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
syscall sys_close nr 3 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_newstat nr 4 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
syscall sys_newfstat nr 5 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
syscall sys_newlstat nr 6 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
syscall sys_poll nr 7 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
syscall sys_lseek nr 8 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
syscall sys_mmap nr 9 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, off)
syscall sys_mprotect nr 10 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
syscall sys_munmap nr 11 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
syscall sys_brk nr 12 nbargs 1 types: (unsigned long) args: (brk)
syscall sys_rt_sigaction nr 13 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
syscall sys_rt_sigprocmask nr 14 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
syscall sys_ioctl nr 16 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
syscall sys_readv nr 19 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
syscall sys_writev nr 20 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
syscall sys_access nr 21 nbargs 2 types: (const char *, int) args: (filename, mode)
syscall sys_pipe nr 22 nbargs 1 types: (int *) args: (fildes)
syscall sys_select nr 23 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
syscall sys_sched_yield nr 24 nbargs 0 types: () args: ()
syscall sys_mremap nr 25 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
syscall sys_msync nr 26 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
syscall sys_mincore nr 27 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
syscall sys_madvise nr 28 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
syscall sys_shmget nr 29 nbargs 3 types: (key_t, size_t, int) args: (key, size, shmflg)
syscall sys_shmat nr 30 nbargs 3 types: (int, char *, int) args: (shmid, shmaddr, shmflg)
syscall sys_shmctl nr 31 nbargs 3 types: (int, int, struct shmid_ds *) args: (shmid, cmd, buf)
syscall sys_dup nr 32 nbargs 1 types: (unsigned int) args: (fildes)
syscall sys_dup2 nr 33 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
syscall sys_pause nr 34 nbargs 0 types: () args: ()
syscall sys_nanosleep nr 35 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
syscall sys_getitimer nr 36 nbargs 2 types: (int, struct itimerval *) args: (which, value)
syscall sys_alarm nr 37 nbargs 1 types: (unsigned int) args: (seconds)
syscall sys_setitimer nr 38 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
syscall sys_getpid nr 39 nbargs 0 types: () args: ()
syscall sys_sendfile64 nr 40 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
syscall sys_socket nr 41 nbargs 3 types: (int, int, int) args: (family, type, protocol)
syscall sys_connect nr 42 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, uservaddr, addrlen)
syscall sys_accept nr 43 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, upeer_sockaddr, upeer_addrlen)
syscall sys_sendto nr 44 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int) args: (fd, buff, len, flags, addr, addr_len)
syscall sys_recvfrom nr 45 nbargs 6 types: (int, void *, size_t, unsigned, struct sockaddr *, int *) args: (fd, ubuf, size, flags, addr, addr_len)
syscall sys_sendmsg nr 46 nbargs 3 types: (int, struct msghdr *, unsigned) args: (fd, msg, flags)
syscall sys_recvmsg nr 47 nbargs 3 types: (int, struct msghdr *, unsigned int) args: (fd, msg, flags)
syscall sys_shutdown nr 48 nbargs 2 types: (int, int) args: (fd, how)
syscall sys_bind nr 49 nbargs 3 types: (int, struct sockaddr *, int) args: (fd, umyaddr, addrlen)
syscall sys_listen nr 50 nbargs 2 types: (int, int) args: (fd, backlog)
syscall sys_getsockname nr 51 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
syscall sys_getpeername nr 52 nbargs 3 types: (int, struct sockaddr *, int *) args: (fd, usockaddr, usockaddr_len)
syscall sys_socketpair nr 53 nbargs 4 types: (int, int, int, int *) args: (family, type, protocol, usockvec)
syscall sys_setsockopt nr 54 nbargs 5 types: (int, int, int, char *, int) args: (fd, level, optname, optval, optlen)
syscall sys_getsockopt nr 55 nbargs 5 types: (int, int, int, char *, int *) args: (fd, level, optname, optval, optlen)
syscall sys_exit nr 60 nbargs 1 types: (int) args: (error_code)
syscall sys_wait4 nr 61 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
syscall sys_kill nr 62 nbargs 2 types: (pid_t, int) args: (pid, sig)
syscall sys_newuname nr 63 nbargs 1 types: (struct new_utsname *) args: (name)
syscall sys_semget nr 64 nbargs 3 types: (key_t, int, int) args: (key, nsems, semflg)
syscall sys_semop nr 65 nbargs 3 types: (int, struct sembuf *, unsigned) args: (semid, tsops, nsops)
syscall sys_shmdt nr 67 nbargs 1 types: (char *) args: (shmaddr)
syscall sys_msgget nr 68 nbargs 2 types: (key_t, int) args: (key, msgflg)
syscall sys_msgsnd nr 69 nbargs 4 types: (int, struct msgbuf *, size_t, int) args: (msqid, msgp, msgsz, msgflg)
syscall sys_msgrcv nr 70 nbargs 5 types: (int, struct msgbuf *, size_t, long, int) args: (msqid, msgp, msgsz, msgtyp, msgflg)
syscall sys_msgctl nr 71 nbargs 3 types: (int, int, struct msqid_ds *) args: (msqid, cmd, buf)
syscall sys_fcntl nr 72 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
syscall sys_flock nr 73 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
syscall sys_fsync nr 74 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_fdatasync nr 75 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_truncate nr 76 nbargs 2 types: (const char *, long) args: (path, length)
syscall sys_ftruncate nr 77 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
syscall sys_getdents nr 78 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
syscall sys_getcwd nr 79 nbargs 2 types: (char *, unsigned long) args: (buf, size)
syscall sys_chdir nr 80 nbargs 1 types: (const char *) args: (filename)
syscall sys_fchdir nr 81 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_rename nr 82 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_mkdir nr 83 nbargs 2 types: (const char *, int) args: (pathname, mode)
syscall sys_rmdir nr 84 nbargs 1 types: (const char *) args: (pathname)
syscall sys_creat nr 85 nbargs 2 types: (const char *, int) args: (pathname, mode)
syscall sys_link nr 86 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_unlink nr 87 nbargs 1 types: (const char *) args: (pathname)
syscall sys_symlink nr 88 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_readlink nr 89 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
syscall sys_chmod nr 90 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
syscall sys_fchmod nr 91 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
syscall sys_chown nr 92 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
syscall sys_fchown nr 93 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
syscall sys_lchown nr 94 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
syscall sys_umask nr 95 nbargs 1 types: (int) args: (mask)
syscall sys_gettimeofday nr 96 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
syscall sys_getrlimit nr 97 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
syscall sys_getrusage nr 98 nbargs 2 types: (int, struct rusage *) args: (who, ru)
syscall sys_sysinfo nr 99 nbargs 1 types: (struct sysinfo *) args: (info)
syscall sys_times nr 100 nbargs 1 types: (struct tms *) args: (tbuf)
syscall sys_ptrace nr 101 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
syscall sys_getuid nr 102 nbargs 0 types: () args: ()
syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
syscall sys_getgid nr 104 nbargs 0 types: () args: ()
syscall sys_setuid nr 105 nbargs 1 types: (uid_t) args: (uid)
syscall sys_setgid nr 106 nbargs 1 types: (gid_t) args: (gid)
syscall sys_geteuid nr 107 nbargs 0 types: () args: ()
syscall sys_getegid nr 108 nbargs 0 types: () args: ()
syscall sys_setpgid nr 109 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
syscall sys_getppid nr 110 nbargs 0 types: () args: ()
syscall sys_getpgrp nr 111 nbargs 0 types: () args: ()
syscall sys_setsid nr 112 nbargs 0 types: () args: ()
syscall sys_setreuid nr 113 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
syscall sys_setregid nr 114 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
syscall sys_getgroups nr 115 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
syscall sys_setgroups nr 116 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
syscall sys_setresuid nr 117 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
syscall sys_getresuid nr 118 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
syscall sys_setresgid nr 119 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
syscall sys_getresgid nr 120 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
syscall sys_getpgid nr 121 nbargs 1 types: (pid_t) args: (pid)
syscall sys_setfsuid nr 122 nbargs 1 types: (uid_t) args: (uid)
syscall sys_setfsgid nr 123 nbargs 1 types: (gid_t) args: (gid)
syscall sys_getsid nr 124 nbargs 1 types: (pid_t) args: (pid)
syscall sys_capget nr 125 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
syscall sys_capset nr 126 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
syscall sys_rt_sigpending nr 127 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
syscall sys_rt_sigtimedwait nr 128 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
syscall sys_rt_sigqueueinfo nr 129 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
syscall sys_rt_sigsuspend nr 130 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
syscall sys_utime nr 132 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
syscall sys_mknod nr 133 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
syscall sys_personality nr 135 nbargs 1 types: (unsigned int) args: (personality)
syscall sys_ustat nr 136 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
syscall sys_statfs nr 137 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
syscall sys_fstatfs nr 138 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
syscall sys_sysfs nr 139 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
syscall sys_getpriority nr 140 nbargs 2 types: (int, int) args: (which, who)
syscall sys_setpriority nr 141 nbargs 3 types: (int, int, int) args: (which, who, niceval)
syscall sys_sched_setparam nr 142 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
syscall sys_sched_getparam nr 143 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
syscall sys_sched_setscheduler nr 144 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
syscall sys_sched_getscheduler nr 145 nbargs 1 types: (pid_t) args: (pid)
syscall sys_sched_get_priority_max nr 146 nbargs 1 types: (int) args: (policy)
syscall sys_sched_get_priority_min nr 147 nbargs 1 types: (int) args: (policy)
syscall sys_sched_rr_get_interval nr 148 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
syscall sys_mlock nr 149 nbargs 2 types: (unsigned long, size_t) args: (start, len)
syscall sys_munlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
syscall sys_mlockall nr 151 nbargs 1 types: (int) args: (flags)
syscall sys_munlockall nr 152 nbargs 0 types: () args: ()
syscall sys_vhangup nr 153 nbargs 0 types: () args: ()
syscall sys_pivot_root nr 155 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
syscall sys_sysctl nr 156 nbargs 1 types: (struct __sysctl_args *) args: (args)
syscall sys_prctl nr 157 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
syscall sys_adjtimex nr 159 nbargs 1 types: (struct timex *) args: (txc_p)
syscall sys_setrlimit nr 160 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
syscall sys_chroot nr 161 nbargs 1 types: (const char *) args: (filename)
syscall sys_sync nr 162 nbargs 0 types: () args: ()
syscall sys_settimeofday nr 164 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
syscall sys_mount nr 165 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
syscall sys_umount nr 166 nbargs 2 types: (char *, int) args: (name, flags)
syscall sys_swapon nr 167 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
syscall sys_swapoff nr 168 nbargs 1 types: (const char *) args: (specialfile)
syscall sys_reboot nr 169 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
syscall sys_sethostname nr 170 nbargs 2 types: (char *, int) args: (name, len)
syscall sys_setdomainname nr 171 nbargs 2 types: (char *, int) args: (name, len)
syscall sys_init_module nr 175 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
syscall sys_delete_module nr 176 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
syscall sys_nfsservctl nr 180 nbargs 3 types: (int, struct nfsctl_arg *, void *) args: (cmd, arg, res)
syscall sys_gettid nr 186 nbargs 0 types: () args: ()
syscall sys_setxattr nr 188 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
syscall sys_lsetxattr nr 189 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
syscall sys_fsetxattr nr 190 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
syscall sys_getxattr nr 191 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
syscall sys_lgetxattr nr 192 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
syscall sys_fgetxattr nr 193 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
syscall sys_listxattr nr 194 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
syscall sys_llistxattr nr 195 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
syscall sys_flistxattr nr 196 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
syscall sys_removexattr nr 197 nbargs 2 types: (const char *, const char *) args: (pathname, name)
syscall sys_lremovexattr nr 198 nbargs 2 types: (const char *, const char *) args: (pathname, name)
syscall sys_fremovexattr nr 199 nbargs 2 types: (int, const char *) args: (fd, name)
syscall sys_tkill nr 200 nbargs 2 types: (pid_t, int) args: (pid, sig)
syscall sys_time nr 201 nbargs 1 types: (time_t *) args: (tloc)
syscall sys_futex nr 202 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
syscall sys_sched_setaffinity nr 203 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
syscall sys_sched_getaffinity nr 204 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
syscall sys_io_setup nr 206 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
syscall sys_io_destroy nr 207 nbargs 1 types: (aio_context_t) args: (ctx)
syscall sys_io_getevents nr 208 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
syscall sys_io_submit nr 209 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
syscall sys_io_cancel nr 210 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
syscall sys_epoll_create nr 213 nbargs 1 types: (int) args: (size)
syscall sys_remap_file_pages nr 216 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
syscall sys_getdents64 nr 217 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
syscall sys_set_tid_address nr 218 nbargs 1 types: (int *) args: (tidptr)
syscall sys_restart_syscall nr 219 nbargs 0 types: () args: ()
syscall sys_semtimedop nr 220 nbargs 4 types: (int, struct sembuf *, unsigned, const struct timespec *) args: (semid, tsops, nsops, timeout)
syscall sys_timer_create nr 222 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
syscall sys_timer_settime nr 223 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
syscall sys_timer_gettime nr 224 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
syscall sys_timer_getoverrun nr 225 nbargs 1 types: (timer_t) args: (timer_id)
syscall sys_timer_delete nr 226 nbargs 1 types: (timer_t) args: (timer_id)
syscall sys_clock_settime nr 227 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
syscall sys_clock_gettime nr 228 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
syscall sys_clock_getres nr 229 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
syscall sys_clock_nanosleep nr 230 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
syscall sys_exit_group nr 231 nbargs 1 types: (int) args: (error_code)
syscall sys_epoll_wait nr 232 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
syscall sys_epoll_ctl nr 233 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
syscall sys_tgkill nr 234 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
syscall sys_utimes nr 235 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
syscall sys_mq_open nr 240 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
syscall sys_mq_unlink nr 241 nbargs 1 types: (const char *) args: (u_name)
syscall sys_mq_timedsend nr 242 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
syscall sys_mq_timedreceive nr 243 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
syscall sys_mq_notify nr 244 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
syscall sys_mq_getsetattr nr 245 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
syscall sys_kexec_load nr 246 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
syscall sys_waitid nr 247 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
syscall sys_ioprio_set nr 251 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
syscall sys_ioprio_get nr 252 nbargs 2 types: (int, int) args: (which, who)
syscall sys_inotify_init nr 253 nbargs 0 types: () args: ()
syscall sys_inotify_add_watch nr 254 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
syscall sys_inotify_rm_watch nr 255 nbargs 2 types: (int, __s32) args: (fd, wd)
syscall sys_openat nr 257 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
syscall sys_mkdirat nr 258 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
syscall sys_mknodat nr 259 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
syscall sys_fchownat nr 260 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
syscall sys_futimesat nr 261 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
syscall sys_newfstatat nr 262 nbargs 4 types: (int, const char *, struct stat *, int) args: (dfd, filename, statbuf, flag)
syscall sys_unlinkat nr 263 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
syscall sys_renameat nr 264 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
syscall sys_linkat nr 265 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
syscall sys_symlinkat nr 266 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
syscall sys_readlinkat nr 267 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
syscall sys_fchmodat nr 268 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
syscall sys_faccessat nr 269 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
syscall sys_pselect6 nr 270 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
syscall sys_ppoll nr 271 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
syscall sys_unshare nr 272 nbargs 1 types: (unsigned long) args: (unshare_flags)
syscall sys_set_robust_list nr 273 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
syscall sys_get_robust_list nr 274 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
syscall sys_splice nr 275 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
syscall sys_tee nr 276 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
syscall sys_vmsplice nr 278 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
syscall sys_utimensat nr 280 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
syscall sys_epoll_pwait nr 281 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
syscall sys_signalfd nr 282 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
syscall sys_timerfd_create nr 283 nbargs 2 types: (int, int) args: (clockid, flags)
syscall sys_eventfd nr 284 nbargs 1 types: (unsigned int) args: (count)
syscall sys_timerfd_settime nr 286 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
syscall sys_timerfd_gettime nr 287 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
syscall sys_accept4 nr 288 nbargs 4 types: (int, struct sockaddr *, int *, int) args: (fd, upeer_sockaddr, upeer_addrlen, flags)
syscall sys_signalfd4 nr 289 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
syscall sys_eventfd2 nr 290 nbargs 2 types: (unsigned int, int) args: (count, flags)
syscall sys_epoll_create1 nr 291 nbargs 1 types: (int) args: (flags)
syscall sys_dup3 nr 292 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
syscall sys_pipe2 nr 293 nbargs 2 types: (int *, int) args: (fildes, flags)
syscall sys_inotify_init1 nr 294 nbargs 1 types: (int) args: (flags)
syscall sys_preadv nr 295 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
syscall sys_pwritev nr 296 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
syscall sys_rt_tgsigqueueinfo nr 297 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
syscall sys_perf_event_open nr 298 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
syscall sys_recvmmsg nr 299 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
syscall sys_prlimit64 nr 302 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
syscall sys_clock_adjtime nr 305 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
syscall sys_syncfs nr 306 nbargs 1 types: (int) args: (fd)
syscall sys_sendmmsg nr 307 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
syscall sys_setns nr 308 nbargs 2 types: (int, int) args: (fd, nstype)

View file

@ -1,291 +0,0 @@
syscall sys_restart_syscall nr 0 nbargs 0 types: () args: ()
syscall sys_exit nr 1 nbargs 1 types: (int) args: (error_code)
syscall sys_read nr 3 nbargs 3 types: (unsigned int, char *, size_t) args: (fd, buf, count)
syscall sys_write nr 4 nbargs 3 types: (unsigned int, const char *, size_t) args: (fd, buf, count)
syscall sys_open nr 5 nbargs 3 types: (const char *, int, int) args: (filename, flags, mode)
syscall sys_close nr 6 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_waitpid nr 7 nbargs 3 types: (pid_t, int *, int) args: (pid, stat_addr, options)
syscall sys_creat nr 8 nbargs 2 types: (const char *, int) args: (pathname, mode)
syscall sys_link nr 9 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_unlink nr 10 nbargs 1 types: (const char *) args: (pathname)
syscall sys_chdir nr 12 nbargs 1 types: (const char *) args: (filename)
syscall sys_time nr 13 nbargs 1 types: (time_t *) args: (tloc)
syscall sys_mknod nr 14 nbargs 3 types: (const char *, int, unsigned) args: (filename, mode, dev)
syscall sys_chmod nr 15 nbargs 2 types: (const char *, mode_t) args: (filename, mode)
syscall sys_lchown16 nr 16 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
syscall sys_stat nr 18 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
syscall sys_lseek nr 19 nbargs 3 types: (unsigned int, off_t, unsigned int) args: (fd, offset, origin)
syscall sys_getpid nr 20 nbargs 0 types: () args: ()
syscall sys_mount nr 21 nbargs 5 types: (char *, char *, char *, unsigned long, void *) args: (dev_name, dir_name, type, flags, data)
syscall sys_oldumount nr 22 nbargs 1 types: (char *) args: (name)
syscall sys_setuid16 nr 23 nbargs 1 types: (old_uid_t) args: (uid)
syscall sys_getuid16 nr 24 nbargs 0 types: () args: ()
syscall sys_stime nr 25 nbargs 1 types: (time_t *) args: (tptr)
syscall sys_ptrace nr 26 nbargs 4 types: (long, long, unsigned long, unsigned long) args: (request, pid, addr, data)
syscall sys_alarm nr 27 nbargs 1 types: (unsigned int) args: (seconds)
syscall sys_fstat nr 28 nbargs 2 types: (unsigned int, struct __old_kernel_stat *) args: (fd, statbuf)
syscall sys_pause nr 29 nbargs 0 types: () args: ()
syscall sys_utime nr 30 nbargs 2 types: (char *, struct utimbuf *) args: (filename, times)
syscall sys_access nr 33 nbargs 2 types: (const char *, int) args: (filename, mode)
syscall sys_nice nr 34 nbargs 1 types: (int) args: (increment)
syscall sys_sync nr 36 nbargs 0 types: () args: ()
syscall sys_kill nr 37 nbargs 2 types: (pid_t, int) args: (pid, sig)
syscall sys_rename nr 38 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_mkdir nr 39 nbargs 2 types: (const char *, int) args: (pathname, mode)
syscall sys_rmdir nr 40 nbargs 1 types: (const char *) args: (pathname)
syscall sys_dup nr 41 nbargs 1 types: (unsigned int) args: (fildes)
syscall sys_pipe nr 42 nbargs 1 types: (int *) args: (fildes)
syscall sys_times nr 43 nbargs 1 types: (struct tms *) args: (tbuf)
syscall sys_brk nr 45 nbargs 1 types: (unsigned long) args: (brk)
syscall sys_setgid16 nr 46 nbargs 1 types: (old_gid_t) args: (gid)
syscall sys_getgid16 nr 47 nbargs 0 types: () args: ()
syscall sys_signal nr 48 nbargs 2 types: (int, __sighandler_t) args: (sig, handler)
syscall sys_geteuid16 nr 49 nbargs 0 types: () args: ()
syscall sys_getegid16 nr 50 nbargs 0 types: () args: ()
syscall sys_acct nr 51 nbargs 1 types: (const char *) args: (name)
syscall sys_umount nr 52 nbargs 2 types: (char *, int) args: (name, flags)
syscall sys_ioctl nr 54 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
syscall sys_fcntl nr 55 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
syscall sys_setpgid nr 57 nbargs 2 types: (pid_t, pid_t) args: (pid, pgid)
syscall sys_olduname nr 59 nbargs 1 types: (struct oldold_utsname *) args: (name)
syscall sys_umask nr 60 nbargs 1 types: (int) args: (mask)
syscall sys_chroot nr 61 nbargs 1 types: (const char *) args: (filename)
syscall sys_ustat nr 62 nbargs 2 types: (unsigned, struct ustat *) args: (dev, ubuf)
syscall sys_dup2 nr 63 nbargs 2 types: (unsigned int, unsigned int) args: (oldfd, newfd)
syscall sys_getppid nr 64 nbargs 0 types: () args: ()
syscall sys_getpgrp nr 65 nbargs 0 types: () args: ()
syscall sys_setsid nr 66 nbargs 0 types: () args: ()
syscall sys_sgetmask nr 68 nbargs 0 types: () args: ()
syscall sys_ssetmask nr 69 nbargs 1 types: (int) args: (newmask)
syscall sys_setreuid16 nr 70 nbargs 2 types: (old_uid_t, old_uid_t) args: (ruid, euid)
syscall sys_setregid16 nr 71 nbargs 2 types: (old_gid_t, old_gid_t) args: (rgid, egid)
syscall sys_sigpending nr 73 nbargs 1 types: (old_sigset_t *) args: (set)
syscall sys_sethostname nr 74 nbargs 2 types: (char *, int) args: (name, len)
syscall sys_setrlimit nr 75 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
syscall sys_old_getrlimit nr 76 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
syscall sys_getrusage nr 77 nbargs 2 types: (int, struct rusage *) args: (who, ru)
syscall sys_gettimeofday nr 78 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
syscall sys_settimeofday nr 79 nbargs 2 types: (struct timeval *, struct timezone *) args: (tv, tz)
syscall sys_getgroups16 nr 80 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
syscall sys_setgroups16 nr 81 nbargs 2 types: (int, old_gid_t *) args: (gidsetsize, grouplist)
syscall sys_old_select nr 82 nbargs 1 types: (struct sel_arg_struct *) args: (arg)
syscall sys_symlink nr 83 nbargs 2 types: (const char *, const char *) args: (oldname, newname)
syscall sys_lstat nr 84 nbargs 2 types: (const char *, struct __old_kernel_stat *) args: (filename, statbuf)
syscall sys_readlink nr 85 nbargs 3 types: (const char *, char *, int) args: (path, buf, bufsiz)
syscall sys_uselib nr 86 nbargs 1 types: (const char *) args: (library)
syscall sys_swapon nr 87 nbargs 2 types: (const char *, int) args: (specialfile, swap_flags)
syscall sys_reboot nr 88 nbargs 4 types: (int, int, unsigned int, void *) args: (magic1, magic2, cmd, arg)
syscall sys_old_readdir nr 89 nbargs 3 types: (unsigned int, struct old_linux_dirent *, unsigned int) args: (fd, dirent, count)
syscall sys_old_mmap nr 90 nbargs 1 types: (struct mmap_arg_struct *) args: (arg)
syscall sys_munmap nr 91 nbargs 2 types: (unsigned long, size_t) args: (addr, len)
syscall sys_truncate nr 92 nbargs 2 types: (const char *, long) args: (path, length)
syscall sys_ftruncate nr 93 nbargs 2 types: (unsigned int, unsigned long) args: (fd, length)
syscall sys_fchmod nr 94 nbargs 2 types: (unsigned int, mode_t) args: (fd, mode)
syscall sys_fchown16 nr 95 nbargs 3 types: (unsigned int, old_uid_t, old_gid_t) args: (fd, user, group)
syscall sys_getpriority nr 96 nbargs 2 types: (int, int) args: (which, who)
syscall sys_setpriority nr 97 nbargs 3 types: (int, int, int) args: (which, who, niceval)
syscall sys_statfs nr 99 nbargs 2 types: (const char *, struct statfs *) args: (pathname, buf)
syscall sys_fstatfs nr 100 nbargs 2 types: (unsigned int, struct statfs *) args: (fd, buf)
syscall sys_socketcall nr 102 nbargs 2 types: (int, unsigned long *) args: (call, args)
syscall sys_syslog nr 103 nbargs 3 types: (int, char *, int) args: (type, buf, len)
syscall sys_setitimer nr 104 nbargs 3 types: (int, struct itimerval *, struct itimerval *) args: (which, value, ovalue)
syscall sys_getitimer nr 105 nbargs 2 types: (int, struct itimerval *) args: (which, value)
syscall sys_newstat nr 106 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
syscall sys_newlstat nr 107 nbargs 2 types: (const char *, struct stat *) args: (filename, statbuf)
syscall sys_newfstat nr 108 nbargs 2 types: (unsigned int, struct stat *) args: (fd, statbuf)
syscall sys_uname nr 109 nbargs 1 types: (struct old_utsname *) args: (name)
syscall sys_vhangup nr 111 nbargs 0 types: () args: ()
syscall sys_wait4 nr 114 nbargs 4 types: (pid_t, int *, int, struct rusage *) args: (upid, stat_addr, options, ru)
syscall sys_swapoff nr 115 nbargs 1 types: (const char *) args: (specialfile)
syscall sys_sysinfo nr 116 nbargs 1 types: (struct sysinfo *) args: (info)
syscall sys_ipc nr 117 nbargs 6 types: (unsigned int, int, unsigned long, unsigned long, void *, long) args: (call, first, second, third, ptr, fifth)
syscall sys_fsync nr 118 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_setdomainname nr 121 nbargs 2 types: (char *, int) args: (name, len)
syscall sys_newuname nr 122 nbargs 1 types: (struct new_utsname *) args: (name)
syscall sys_adjtimex nr 124 nbargs 1 types: (struct timex *) args: (txc_p)
syscall sys_mprotect nr 125 nbargs 3 types: (unsigned long, size_t, unsigned long) args: (start, len, prot)
syscall sys_sigprocmask nr 126 nbargs 3 types: (int, old_sigset_t *, old_sigset_t *) args: (how, nset, oset)
syscall sys_init_module nr 128 nbargs 3 types: (void *, unsigned long, const char *) args: (umod, len, uargs)
syscall sys_delete_module nr 129 nbargs 2 types: (const char *, unsigned int) args: (name_user, flags)
syscall sys_quotactl nr 131 nbargs 4 types: (unsigned int, const char *, qid_t, void *) args: (cmd, special, id, addr)
syscall sys_getpgid nr 132 nbargs 1 types: (pid_t) args: (pid)
syscall sys_fchdir nr 133 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_bdflush nr 134 nbargs 2 types: (int, long) args: (func, data)
syscall sys_sysfs nr 135 nbargs 3 types: (int, unsigned long, unsigned long) args: (option, arg1, arg2)
syscall sys_personality nr 136 nbargs 1 types: (unsigned int) args: (personality)
syscall sys_setfsuid16 nr 138 nbargs 1 types: (old_uid_t) args: (uid)
syscall sys_setfsgid16 nr 139 nbargs 1 types: (old_gid_t) args: (gid)
syscall sys_llseek nr 140 nbargs 5 types: (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int) args: (fd, offset_high, offset_low, result, origin)
syscall sys_getdents nr 141 nbargs 3 types: (unsigned int, struct linux_dirent *, unsigned int) args: (fd, dirent, count)
syscall sys_select nr 142 nbargs 5 types: (int, fd_set *, fd_set *, fd_set *, struct timeval *) args: (n, inp, outp, exp, tvp)
syscall sys_flock nr 143 nbargs 2 types: (unsigned int, unsigned int) args: (fd, cmd)
syscall sys_msync nr 144 nbargs 3 types: (unsigned long, size_t, int) args: (start, len, flags)
syscall sys_readv nr 145 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
syscall sys_writev nr 146 nbargs 3 types: (unsigned long, const struct iovec *, unsigned long) args: (fd, vec, vlen)
syscall sys_getsid nr 147 nbargs 1 types: (pid_t) args: (pid)
syscall sys_fdatasync nr 148 nbargs 1 types: (unsigned int) args: (fd)
syscall sys_sysctl nr 149 nbargs 1 types: (struct __sysctl_args *) args: (args)
syscall sys_mlock nr 150 nbargs 2 types: (unsigned long, size_t) args: (start, len)
syscall sys_munlock nr 151 nbargs 2 types: (unsigned long, size_t) args: (start, len)
syscall sys_mlockall nr 152 nbargs 1 types: (int) args: (flags)
syscall sys_munlockall nr 153 nbargs 0 types: () args: ()
syscall sys_sched_setparam nr 154 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
syscall sys_sched_getparam nr 155 nbargs 2 types: (pid_t, struct sched_param *) args: (pid, param)
syscall sys_sched_setscheduler nr 156 nbargs 3 types: (pid_t, int, struct sched_param *) args: (pid, policy, param)
syscall sys_sched_getscheduler nr 157 nbargs 1 types: (pid_t) args: (pid)
syscall sys_sched_yield nr 158 nbargs 0 types: () args: ()
syscall sys_sched_get_priority_max nr 159 nbargs 1 types: (int) args: (policy)
syscall sys_sched_get_priority_min nr 160 nbargs 1 types: (int) args: (policy)
syscall sys_sched_rr_get_interval nr 161 nbargs 2 types: (pid_t, struct timespec *) args: (pid, interval)
syscall sys_nanosleep nr 162 nbargs 2 types: (struct timespec *, struct timespec *) args: (rqtp, rmtp)
syscall sys_mremap nr 163 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, old_len, new_len, flags, new_addr)
syscall sys_setresuid16 nr 164 nbargs 3 types: (old_uid_t, old_uid_t, old_uid_t) args: (ruid, euid, suid)
syscall sys_getresuid16 nr 165 nbargs 3 types: (old_uid_t *, old_uid_t *, old_uid_t *) args: (ruid, euid, suid)
syscall sys_poll nr 168 nbargs 3 types: (struct pollfd *, unsigned int, long) args: (ufds, nfds, timeout_msecs)
syscall sys_setresgid16 nr 170 nbargs 3 types: (old_gid_t, old_gid_t, old_gid_t) args: (rgid, egid, sgid)
syscall sys_getresgid16 nr 171 nbargs 3 types: (old_gid_t *, old_gid_t *, old_gid_t *) args: (rgid, egid, sgid)
syscall sys_prctl nr 172 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
syscall sys_rt_sigaction nr 174 nbargs 4 types: (int, const struct sigaction *, struct sigaction *, size_t) args: (sig, act, oact, sigsetsize)
syscall sys_rt_sigprocmask nr 175 nbargs 4 types: (int, sigset_t *, sigset_t *, size_t) args: (how, nset, oset, sigsetsize)
syscall sys_rt_sigpending nr 176 nbargs 2 types: (sigset_t *, size_t) args: (set, sigsetsize)
syscall sys_rt_sigtimedwait nr 177 nbargs 4 types: (const sigset_t *, siginfo_t *, const struct timespec *, size_t) args: (uthese, uinfo, uts, sigsetsize)
syscall sys_rt_sigqueueinfo nr 178 nbargs 3 types: (pid_t, int, siginfo_t *) args: (pid, sig, uinfo)
syscall sys_rt_sigsuspend nr 179 nbargs 2 types: (sigset_t *, size_t) args: (unewset, sigsetsize)
syscall sys_chown16 nr 182 nbargs 3 types: (const char *, old_uid_t, old_gid_t) args: (filename, user, group)
syscall sys_getcwd nr 183 nbargs 2 types: (char *, unsigned long) args: (buf, size)
syscall sys_capget nr 184 nbargs 2 types: (cap_user_header_t, cap_user_data_t) args: (header, dataptr)
syscall sys_capset nr 185 nbargs 2 types: (cap_user_header_t, const cap_user_data_t) args: (header, data)
syscall sys_sendfile nr 187 nbargs 4 types: (int, int, off_t *, size_t) args: (out_fd, in_fd, offset, count)
syscall sys_getrlimit nr 191 nbargs 2 types: (unsigned int, struct rlimit *) args: (resource, rlim)
syscall sys_mmap_pgoff nr 192 nbargs 6 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (addr, len, prot, flags, fd, pgoff)
syscall sys_stat64 nr 195 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
syscall sys_lstat64 nr 196 nbargs 2 types: (const char *, struct stat64 *) args: (filename, statbuf)
syscall sys_fstat64 nr 197 nbargs 2 types: (unsigned long, struct stat64 *) args: (fd, statbuf)
syscall sys_lchown nr 198 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
syscall sys_getuid nr 199 nbargs 0 types: () args: ()
syscall sys_getgid nr 200 nbargs 0 types: () args: ()
syscall sys_geteuid nr 201 nbargs 0 types: () args: ()
syscall sys_getegid nr 202 nbargs 0 types: () args: ()
syscall sys_setreuid nr 203 nbargs 2 types: (uid_t, uid_t) args: (ruid, euid)
syscall sys_setregid nr 204 nbargs 2 types: (gid_t, gid_t) args: (rgid, egid)
syscall sys_getgroups nr 205 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
syscall sys_setgroups nr 206 nbargs 2 types: (int, gid_t *) args: (gidsetsize, grouplist)
syscall sys_fchown nr 207 nbargs 3 types: (unsigned int, uid_t, gid_t) args: (fd, user, group)
syscall sys_setresuid nr 208 nbargs 3 types: (uid_t, uid_t, uid_t) args: (ruid, euid, suid)
syscall sys_getresuid nr 209 nbargs 3 types: (uid_t *, uid_t *, uid_t *) args: (ruid, euid, suid)
syscall sys_setresgid nr 210 nbargs 3 types: (gid_t, gid_t, gid_t) args: (rgid, egid, sgid)
syscall sys_getresgid nr 211 nbargs 3 types: (gid_t *, gid_t *, gid_t *) args: (rgid, egid, sgid)
syscall sys_chown nr 212 nbargs 3 types: (const char *, uid_t, gid_t) args: (filename, user, group)
syscall sys_setuid nr 213 nbargs 1 types: (uid_t) args: (uid)
syscall sys_setgid nr 214 nbargs 1 types: (gid_t) args: (gid)
syscall sys_setfsuid nr 215 nbargs 1 types: (uid_t) args: (uid)
syscall sys_setfsgid nr 216 nbargs 1 types: (gid_t) args: (gid)
syscall sys_pivot_root nr 217 nbargs 2 types: (const char *, const char *) args: (new_root, put_old)
syscall sys_mincore nr 218 nbargs 3 types: (unsigned long, size_t, unsigned char *) args: (start, len, vec)
syscall sys_madvise nr 219 nbargs 3 types: (unsigned long, size_t, int) args: (start, len_in, behavior)
syscall sys_getdents64 nr 220 nbargs 3 types: (unsigned int, struct linux_dirent64 *, unsigned int) args: (fd, dirent, count)
syscall sys_fcntl64 nr 221 nbargs 3 types: (unsigned int, unsigned int, unsigned long) args: (fd, cmd, arg)
syscall sys_gettid nr 224 nbargs 0 types: () args: ()
syscall sys_setxattr nr 226 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
syscall sys_lsetxattr nr 227 nbargs 5 types: (const char *, const char *, const void *, size_t, int) args: (pathname, name, value, size, flags)
syscall sys_fsetxattr nr 228 nbargs 5 types: (int, const char *, const void *, size_t, int) args: (fd, name, value, size, flags)
syscall sys_getxattr nr 229 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
syscall sys_lgetxattr nr 230 nbargs 4 types: (const char *, const char *, void *, size_t) args: (pathname, name, value, size)
syscall sys_fgetxattr nr 231 nbargs 4 types: (int, const char *, void *, size_t) args: (fd, name, value, size)
syscall sys_listxattr nr 232 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
syscall sys_llistxattr nr 233 nbargs 3 types: (const char *, char *, size_t) args: (pathname, list, size)
syscall sys_flistxattr nr 234 nbargs 3 types: (int, char *, size_t) args: (fd, list, size)
syscall sys_removexattr nr 235 nbargs 2 types: (const char *, const char *) args: (pathname, name)
syscall sys_lremovexattr nr 236 nbargs 2 types: (const char *, const char *) args: (pathname, name)
syscall sys_fremovexattr nr 237 nbargs 2 types: (int, const char *) args: (fd, name)
syscall sys_tkill nr 238 nbargs 2 types: (pid_t, int) args: (pid, sig)
syscall sys_sendfile64 nr 239 nbargs 4 types: (int, int, loff_t *, size_t) args: (out_fd, in_fd, offset, count)
syscall sys_futex nr 240 nbargs 6 types: (u32 *, int, u32, struct timespec *, u32 *, u32) args: (uaddr, op, val, utime, uaddr2, val3)
syscall sys_sched_setaffinity nr 241 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
syscall sys_sched_getaffinity nr 242 nbargs 3 types: (pid_t, unsigned int, unsigned long *) args: (pid, len, user_mask_ptr)
syscall sys_io_setup nr 245 nbargs 2 types: (unsigned, aio_context_t *) args: (nr_events, ctxp)
syscall sys_io_destroy nr 246 nbargs 1 types: (aio_context_t) args: (ctx)
syscall sys_io_getevents nr 247 nbargs 5 types: (aio_context_t, long, long, struct io_event *, struct timespec *) args: (ctx_id, min_nr, nr, events, timeout)
syscall sys_io_submit nr 248 nbargs 3 types: (aio_context_t, long, struct iocb * *) args: (ctx_id, nr, iocbpp)
syscall sys_io_cancel nr 249 nbargs 3 types: (aio_context_t, struct iocb *, struct io_event *) args: (ctx_id, iocb, result)
syscall sys_exit_group nr 252 nbargs 1 types: (int) args: (error_code)
syscall sys_epoll_create nr 254 nbargs 1 types: (int) args: (size)
syscall sys_epoll_ctl nr 255 nbargs 4 types: (int, int, int, struct epoll_event *) args: (epfd, op, fd, event)
syscall sys_epoll_wait nr 256 nbargs 4 types: (int, struct epoll_event *, int, int) args: (epfd, events, maxevents, timeout)
syscall sys_remap_file_pages nr 257 nbargs 5 types: (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) args: (start, size, prot, pgoff, flags)
syscall sys_set_tid_address nr 258 nbargs 1 types: (int *) args: (tidptr)
syscall sys_timer_create nr 259 nbargs 3 types: (const clockid_t, struct sigevent *, timer_t *) args: (which_clock, timer_event_spec, created_timer_id)
syscall sys_timer_settime nr 260 nbargs 4 types: (timer_t, int, const struct itimerspec *, struct itimerspec *) args: (timer_id, flags, new_setting, old_setting)
syscall sys_timer_gettime nr 261 nbargs 2 types: (timer_t, struct itimerspec *) args: (timer_id, setting)
syscall sys_timer_getoverrun nr 262 nbargs 1 types: (timer_t) args: (timer_id)
syscall sys_timer_delete nr 263 nbargs 1 types: (timer_t) args: (timer_id)
syscall sys_clock_settime nr 264 nbargs 2 types: (const clockid_t, const struct timespec *) args: (which_clock, tp)
syscall sys_clock_gettime nr 265 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
syscall sys_clock_getres nr 266 nbargs 2 types: (const clockid_t, struct timespec *) args: (which_clock, tp)
syscall sys_clock_nanosleep nr 267 nbargs 4 types: (const clockid_t, int, const struct timespec *, struct timespec *) args: (which_clock, flags, rqtp, rmtp)
syscall sys_statfs64 nr 268 nbargs 3 types: (const char *, size_t, struct statfs64 *) args: (pathname, sz, buf)
syscall sys_fstatfs64 nr 269 nbargs 3 types: (unsigned int, size_t, struct statfs64 *) args: (fd, sz, buf)
syscall sys_tgkill nr 270 nbargs 3 types: (pid_t, pid_t, int) args: (tgid, pid, sig)
syscall sys_utimes nr 271 nbargs 2 types: (char *, struct timeval *) args: (filename, utimes)
syscall sys_mq_open nr 277 nbargs 4 types: (const char *, int, mode_t, struct mq_attr *) args: (u_name, oflag, mode, u_attr)
syscall sys_mq_unlink nr 278 nbargs 1 types: (const char *) args: (u_name)
syscall sys_mq_timedsend nr 279 nbargs 5 types: (mqd_t, const char *, size_t, unsigned int, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, msg_prio, u_abs_timeout)
syscall sys_mq_timedreceive nr 280 nbargs 5 types: (mqd_t, char *, size_t, unsigned int *, const struct timespec *) args: (mqdes, u_msg_ptr, msg_len, u_msg_prio, u_abs_timeout)
syscall sys_mq_notify nr 281 nbargs 2 types: (mqd_t, const struct sigevent *) args: (mqdes, u_notification)
syscall sys_mq_getsetattr nr 282 nbargs 3 types: (mqd_t, const struct mq_attr *, struct mq_attr *) args: (mqdes, u_mqstat, u_omqstat)
syscall sys_kexec_load nr 283 nbargs 4 types: (unsigned long, unsigned long, struct kexec_segment *, unsigned long) args: (entry, nr_segments, segments, flags)
syscall sys_waitid nr 284 nbargs 5 types: (int, pid_t, struct siginfo *, int, struct rusage *) args: (which, upid, infop, options, ru)
syscall sys_add_key nr 286 nbargs 5 types: (const char *, const char *, const void *, size_t, key_serial_t) args: (_type, _description, _payload, plen, ringid)
syscall sys_request_key nr 287 nbargs 4 types: (const char *, const char *, const char *, key_serial_t) args: (_type, _description, _callout_info, destringid)
syscall sys_keyctl nr 288 nbargs 5 types: (int, unsigned long, unsigned long, unsigned long, unsigned long) args: (option, arg2, arg3, arg4, arg5)
syscall sys_ioprio_set nr 289 nbargs 3 types: (int, int, int) args: (which, who, ioprio)
syscall sys_ioprio_get nr 290 nbargs 2 types: (int, int) args: (which, who)
syscall sys_inotify_init nr 291 nbargs 0 types: () args: ()
syscall sys_inotify_add_watch nr 292 nbargs 3 types: (int, const char *, u32) args: (fd, pathname, mask)
syscall sys_inotify_rm_watch nr 293 nbargs 2 types: (int, __s32) args: (fd, wd)
syscall sys_openat nr 295 nbargs 4 types: (int, const char *, int, int) args: (dfd, filename, flags, mode)
syscall sys_mkdirat nr 296 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, mode)
syscall sys_mknodat nr 297 nbargs 4 types: (int, const char *, int, unsigned) args: (dfd, filename, mode, dev)
syscall sys_fchownat nr 298 nbargs 5 types: (int, const char *, uid_t, gid_t, int) args: (dfd, filename, user, group, flag)
syscall sys_futimesat nr 299 nbargs 3 types: (int, const char *, struct timeval *) args: (dfd, filename, utimes)
syscall sys_fstatat64 nr 300 nbargs 4 types: (int, const char *, struct stat64 *, int) args: (dfd, filename, statbuf, flag)
syscall sys_unlinkat nr 301 nbargs 3 types: (int, const char *, int) args: (dfd, pathname, flag)
syscall sys_renameat nr 302 nbargs 4 types: (int, const char *, int, const char *) args: (olddfd, oldname, newdfd, newname)
syscall sys_linkat nr 303 nbargs 5 types: (int, const char *, int, const char *, int) args: (olddfd, oldname, newdfd, newname, flags)
syscall sys_symlinkat nr 304 nbargs 3 types: (const char *, int, const char *) args: (oldname, newdfd, newname)
syscall sys_readlinkat nr 305 nbargs 4 types: (int, const char *, char *, int) args: (dfd, pathname, buf, bufsiz)
syscall sys_fchmodat nr 306 nbargs 3 types: (int, const char *, mode_t) args: (dfd, filename, mode)
syscall sys_faccessat nr 307 nbargs 3 types: (int, const char *, int) args: (dfd, filename, mode)
syscall sys_pselect6 nr 308 nbargs 6 types: (int, fd_set *, fd_set *, fd_set *, struct timespec *, void *) args: (n, inp, outp, exp, tsp, sig)
syscall sys_ppoll nr 309 nbargs 5 types: (struct pollfd *, unsigned int, struct timespec *, const sigset_t *, size_t) args: (ufds, nfds, tsp, sigmask, sigsetsize)
syscall sys_unshare nr 310 nbargs 1 types: (unsigned long) args: (unshare_flags)
syscall sys_set_robust_list nr 311 nbargs 2 types: (struct robust_list_head *, size_t) args: (head, len)
syscall sys_get_robust_list nr 312 nbargs 3 types: (int, struct robust_list_head * *, size_t *) args: (pid, head_ptr, len_ptr)
syscall sys_splice nr 313 nbargs 6 types: (int, loff_t *, int, loff_t *, size_t, unsigned int) args: (fd_in, off_in, fd_out, off_out, len, flags)
syscall sys_tee nr 315 nbargs 4 types: (int, int, size_t, unsigned int) args: (fdin, fdout, len, flags)
syscall sys_vmsplice nr 316 nbargs 4 types: (int, const struct iovec *, unsigned long, unsigned int) args: (fd, iov, nr_segs, flags)
syscall sys_getcpu nr 318 nbargs 3 types: (unsigned *, unsigned *, struct getcpu_cache *) args: (cpup, nodep, unused)
syscall sys_epoll_pwait nr 319 nbargs 6 types: (int, struct epoll_event *, int, int, const sigset_t *, size_t) args: (epfd, events, maxevents, timeout, sigmask, sigsetsize)
syscall sys_utimensat nr 320 nbargs 4 types: (int, const char *, struct timespec *, int) args: (dfd, filename, utimes, flags)
syscall sys_signalfd nr 321 nbargs 3 types: (int, sigset_t *, size_t) args: (ufd, user_mask, sizemask)
syscall sys_timerfd_create nr 322 nbargs 2 types: (int, int) args: (clockid, flags)
syscall sys_eventfd nr 323 nbargs 1 types: (unsigned int) args: (count)
syscall sys_timerfd_settime nr 325 nbargs 4 types: (int, int, const struct itimerspec *, struct itimerspec *) args: (ufd, flags, utmr, otmr)
syscall sys_timerfd_gettime nr 326 nbargs 2 types: (int, struct itimerspec *) args: (ufd, otmr)
syscall sys_signalfd4 nr 327 nbargs 4 types: (int, sigset_t *, size_t, int) args: (ufd, user_mask, sizemask, flags)
syscall sys_eventfd2 nr 328 nbargs 2 types: (unsigned int, int) args: (count, flags)
syscall sys_epoll_create1 nr 329 nbargs 1 types: (int) args: (flags)
syscall sys_dup3 nr 330 nbargs 3 types: (unsigned int, unsigned int, int) args: (oldfd, newfd, flags)
syscall sys_pipe2 nr 331 nbargs 2 types: (int *, int) args: (fildes, flags)
syscall sys_inotify_init1 nr 332 nbargs 1 types: (int) args: (flags)
syscall sys_preadv nr 333 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
syscall sys_pwritev nr 334 nbargs 5 types: (unsigned long, const struct iovec *, unsigned long, unsigned long, unsigned long) args: (fd, vec, vlen, pos_l, pos_h)
syscall sys_rt_tgsigqueueinfo nr 335 nbargs 4 types: (pid_t, pid_t, int, siginfo_t *) args: (tgid, pid, sig, uinfo)
syscall sys_perf_event_open nr 336 nbargs 5 types: (struct perf_event_attr *, pid_t, int, int, unsigned long) args: (attr_uptr, pid, cpu, group_fd, flags)
syscall sys_recvmmsg nr 337 nbargs 5 types: (int, struct mmsghdr *, unsigned int, unsigned int, struct timespec *) args: (fd, mmsg, vlen, flags, timeout)
syscall sys_fanotify_init nr 338 nbargs 2 types: (unsigned int, unsigned int) args: (flags, event_f_flags)
syscall sys_prlimit64 nr 340 nbargs 4 types: (pid_t, unsigned int, const struct rlimit64 *, struct rlimit64 *) args: (pid, resource, new_rlim, old_rlim)
syscall sys_clock_adjtime nr 343 nbargs 2 types: (const clockid_t, struct timex *) args: (which_clock, utx)
syscall sys_syncfs nr 344 nbargs 1 types: (int) args: (fd)
syscall sys_sendmmsg nr 345 nbargs 4 types: (int, struct mmsghdr *, unsigned int, unsigned int) args: (fd, mmsg, vlen, flags)
syscall sys_setns nr 346 nbargs 2 types: (int, int) args: (fd, nstype)

View file

@ -1,18 +0,0 @@
LTTng system call tracing
1) lttng-syscall-extractor
You need to build a kernel with CONFIG_FTRACE_SYSCALLS=y and
CONFIG_KALLSYMS_ALL=y for extraction. Apply the linker patch to get your
kernel to keep the system call metadata after boot. Then build and load
the LTTng syscall extractor module. The module will fail to load (this
is expected). See the dmesg output for system call metadata.
2) Generate system call TRACE_EVENT().
Take the dmesg metadata and feed it to lttng-syscalls-generate-headers.sh, e.g.,
from the instrumentation/syscalls directory. See the script header for
usage example.
After these are created, we just need to follow the new system call additions,
no need to regenerate the whole thing, since system calls are only appended to.

View file

@ -1,3 +0,0 @@
#ifdef CONFIG_X86_64
#include "x86-32-syscalls-3.1.0-rc6_integers.h"
#endif

View file

@ -1,3 +0,0 @@
#ifdef CONFIG_X86_64
#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
#endif

View file

@ -1,7 +0,0 @@
#ifdef CONFIG_X86_64
#include "x86-64-syscalls-3.0.4_integers.h"
#endif
#ifdef CONFIG_X86_32
#include "x86-32-syscalls-3.1.0-rc6_integers.h"
#endif

View file

@ -1,14 +0,0 @@
#define OVERRIDE_32_sys_mmap
#define OVERRIDE_64_sys_mmap
#ifndef CREATE_SYSCALL_TABLE
SC_TRACE_EVENT(sys_mmap,
TP_PROTO(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off),
TP_ARGS(addr, len, prot, flags, fd, off),
TP_STRUCT__entry(__field_hex(unsigned long, addr) __field(size_t, len) __field(int, prot) __field(int, flags) __field(int, fd) __field(off_t, offset)),
TP_fast_assign(tp_assign(addr, addr) tp_assign(len, len) tp_assign(prot, prot) tp_assign(flags, flags) tp_assign(fd, fd) tp_assign(offset, off)),
TP_printk()
)
#endif /* CREATE_SYSCALL_TABLE */

View file

@ -1,7 +0,0 @@
#ifdef CONFIG_X86_64
#include "x86-64-syscalls-3.0.4_pointers.h"
#endif
#ifdef CONFIG_X86_32
#include "x86-32-syscalls-3.1.0-rc6_pointers.h"
#endif

View file

@ -1,4 +0,0 @@
/*
* This is a place-holder for override defines for system calls with
* pointers (all architectures).
*/

View file

@ -1,55 +0,0 @@
#if !defined(_TRACE_SYSCALLS_UNKNOWN_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SYSCALLS_UNKNOWN_H
#include <linux/tracepoint.h>
#include <linux/syscalls.h>
#define UNKNOWN_SYSCALL_NRARGS 6
TRACE_EVENT(sys_unknown,
TP_PROTO(unsigned int id, unsigned long *args),
TP_ARGS(id, args),
TP_STRUCT__entry(
__field(unsigned int, id)
__array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
),
TP_fast_assign(
tp_assign(id, id)
tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
),
TP_printk()
)
TRACE_EVENT(compat_sys_unknown,
TP_PROTO(unsigned int id, unsigned long *args),
TP_ARGS(id, args),
TP_STRUCT__entry(
__field(unsigned int, id)
__array(unsigned long, args, UNKNOWN_SYSCALL_NRARGS)
),
TP_fast_assign(
tp_assign(id, id)
tp_memcpy(args, args, UNKNOWN_SYSCALL_NRARGS * sizeof(*args))
),
TP_printk()
)
/*
* This is going to hook on sys_exit in the kernel.
* We change the name so we don't clash with the sys_exit syscall entry
* event.
*/
TRACE_EVENT(exit_syscall,
TP_PROTO(struct pt_regs *regs, long ret),
TP_ARGS(regs, ret),
TP_STRUCT__entry(
__field(long, ret)
),
TP_fast_assign(
tp_assign(ret, ret)
),
TP_printk()
)
#endif /* _TRACE_SYSCALLS_UNKNOWN_H */
/* This part must be outside protection */
#include "../../../probes/define_trace.h"

View file

@ -1,38 +0,0 @@
#ifndef CONFIG_UID16
#define OVERRIDE_32_sys_getuid16
#define OVERRIDE_32_sys_getgid16
#define OVERRIDE_32_sys_geteuid16
#define OVERRIDE_32_sys_getegid16
#define OVERRIDE_32_sys_setuid16
#define OVERRIDE_32_sys_setgid16
#define OVERRIDE_32_sys_setfsuid16
#define OVERRIDE_32_sys_setfsgid16
#define OVERRIDE_32_sys_setreuid16
#define OVERRIDE_32_sys_setregid16
#define OVERRIDE_32_sys_fchown16
#define OVERRIDE_32_sys_setresuid16
#define OVERRIDE_32_sys_setresgid16
#define OVERRIDE_TABLE_32_sys_getuid16
#define OVERRIDE_TABLE_32_sys_getgid16
#define OVERRIDE_TABLE_32_sys_geteuid16
#define OVERRIDE_TABLE_32_sys_getegid16
#define OVERRIDE_TABLE_32_sys_setuid16
#define OVERRIDE_TABLE_32_sys_setgid16
#define OVERRIDE_TABLE_32_sys_setreuid16
#define OVERRIDE_TABLE_32_sys_setregid16
#define OVERRIDE_TABLE_32_sys_fchown16
#define OVERRIDE_TABLE_32_sys_setfsuid16
#define OVERRIDE_TABLE_32_sys_setfsgid16
#define OVERRIDE_TABLE_32_sys_setresuid16
#define OVERRIDE_TABLE_32_sys_setresgid16
#endif
#ifdef CREATE_SYSCALL_TABLE
#define OVERRIDE_TABLE_32_sys_mmap
TRACE_SYSCALL_TABLE(sys_mmap, sys_mmap, 90, 6)
#endif

View file

@ -1,17 +0,0 @@
#ifndef CONFIG_UID16
#define OVERRIDE_32_sys_getgroups16
#define OVERRIDE_32_sys_setgroups16
#define OVERRIDE_32_sys_lchown16
#define OVERRIDE_32_sys_getresuid16
#define OVERRIDE_32_sys_getresgid16
#define OVERRIDE_32_sys_chown16
#define OVERRIDE_TABLE_32_sys_getgroups16
#define OVERRIDE_TABLE_32_sys_setgroups16
#define OVERRIDE_TABLE_32_sys_lchown16
#define OVERRIDE_TABLE_32_sys_getresuid16
#define OVERRIDE_TABLE_32_sys_getresgid16
#define OVERRIDE_TABLE_32_sys_chown16
#endif

View file

@ -1,3 +0,0 @@
/*
* this is a place-holder for x86_64 interger syscall definition override.
*/

View file

@ -1,5 +0,0 @@
#ifndef CREATE_SYSCALL_TABLE
#else /* CREATE_SYSCALL_TABLE */
#endif /* CREATE_SYSCALL_TABLE */

View file

@ -1 +0,0 @@
obj-m += lttng-syscalls-extractor.o

View file

@ -1,85 +0,0 @@
/*
* Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
*
* Dump syscall metadata to console.
*
* GPLv2 license.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/kallsyms.h>
#include <linux/dcache.h>
#include <linux/ftrace_event.h>
#include <trace/syscall.h>
#ifndef CONFIG_FTRACE_SYSCALLS
#error "You need to set CONFIG_FTRACE_SYSCALLS=y"
#endif
#ifndef CONFIG_KALLSYMS_ALL
#error "You need to set CONFIG_KALLSYMS_ALL=y"
#endif
static struct syscall_metadata **__start_syscalls_metadata;
static struct syscall_metadata **__stop_syscalls_metadata;
static __init
struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata **iter;
for (iter = __start_syscalls_metadata;
iter < __stop_syscalls_metadata; iter++) {
if ((*iter)->syscall_nr == syscall)
return (*iter);
}
return NULL;
}
int init_module(void)
{
struct syscall_metadata *meta;
int i;
__start_syscalls_metadata = (void *) kallsyms_lookup_name("__start_syscalls_metadata");
__stop_syscalls_metadata = (void *) kallsyms_lookup_name("__stop_syscalls_metadata");
for (i = 0; i < NR_syscalls; i++) {
int j;
meta = find_syscall_meta(i);
if (!meta)
continue;
printk("syscall %s nr %d nbargs %d ",
meta->name, meta->syscall_nr, meta->nb_args);
printk("types: (");
for (j = 0; j < meta->nb_args; j++) {
if (j > 0)
printk(", ");
printk("%s", meta->types[j]);
}
printk(") ");
printk("args: (");
for (j = 0; j < meta->nb_args; j++) {
if (j > 0)
printk(", ");
printk("%s", meta->args[j]);
}
printk(")\n");
}
printk("SUCCESS\n");
return -1;
}
void cleanup_module(void)
{
}
MODULE_LICENSE("GPL");

View file

@ -1,275 +0,0 @@
#!/bin/sh
# Generate system call probe description macros from syscall metadata dump file.
# example usage:
#
# lttng-syscalls-generate-headers.sh integers 3.0.4 x86-64-syscalls-3.0.4 64
# lttng-syscalls-generate-headers.sh pointers 3.0.4 x86-64-syscalls-3.0.4 64
CLASS=$1
INPUTDIR=$2
INPUTFILE=$3
BITNESS=$4
INPUT=${INPUTDIR}/${INPUTFILE}
SRCFILE=gen.tmp.0
TMPFILE=gen.tmp.1
HEADER=headers/${INPUTFILE}_${CLASS}.h
cp ${INPUT} ${SRCFILE}
#Cleanup
perl -p -e 's/^\[.*\] //g' ${SRCFILE} > ${TMPFILE}
mv ${TMPFILE} ${SRCFILE}
perl -p -e 's/^syscall sys_([^ ]*)/syscall $1/g' ${SRCFILE} > ${TMPFILE}
mv ${TMPFILE} ${SRCFILE}
#Filter
if [ "$CLASS" = integers ]; then
#select integers and no-args.
CLASSCAP=INTEGERS
grep -v "\\*\|cap_user_header_t" ${SRCFILE} > ${TMPFILE}
mv ${TMPFILE} ${SRCFILE}
fi
if [ "$CLASS" = pointers ]; then
#select system calls using pointers.
CLASSCAP=POINTERS
grep "\\*\|cap_#user_header_t" ${SRCFILE} > ${TMPFILE}
mv ${TMPFILE} ${SRCFILE}
fi
echo "/* THIS FILE IS AUTO-GENERATED. DO NOT EDIT */" > ${HEADER}
echo \
"#ifndef CREATE_SYSCALL_TABLE
#if !defined(_TRACE_SYSCALLS_${CLASSCAP}_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SYSCALLS_${CLASSCAP}_H
#include <linux/tracepoint.h>
#include <linux/syscalls.h>
#include \"${INPUTFILE}_${CLASS}_override.h\"
#include \"syscalls_${CLASS}_override.h\"
" >> ${HEADER}
if [ "$CLASS" = integers ]; then
NRARGS=0
echo \
'SC_DECLARE_EVENT_CLASS_NOARGS(syscalls_noargs,\n'\
' TP_STRUCT__entry(),\n'\
' TP_fast_assign(),\n'\
' TP_printk()\n'\
')'\
>> ${HEADER}
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^)]*)\) '\
'args: \(([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_DEFINE_EVENT_NOARGS(syscalls_noargs, sys_$1)\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
fi
# types: 4
# args 5
NRARGS=1
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^)]*)\) '\
'args: \(([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $5),\n'\
' TP_ARGS($5),\n'\
' TP_STRUCT__entry(__field($4, $5)),\n'\
' TP_fast_assign(tp_assign($4, $5, $5)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# types: 4 5
# args 6 7
NRARGS=2
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^,]*), ([^)]*)\) '\
'args: \(([^,]*), ([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $6, $5 $7),\n'\
' TP_ARGS($6, $7),\n'\
' TP_STRUCT__entry(__field($4, $6) __field($5, $7)),\n'\
' TP_fast_assign(tp_assign($4, $6, $6) tp_assign($5, $7, $7)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# types: 4 5 6
# args 7 8 9
NRARGS=3
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^,]*), ([^,]*), ([^)]*)\) '\
'args: \(([^,]*), ([^,]*), ([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $7, $5 $8, $6 $9),\n'\
' TP_ARGS($7, $8, $9),\n'\
' TP_STRUCT__entry(__field($4, $7) __field($5, $8) __field($6, $9)),\n'\
' TP_fast_assign(tp_assign($4, $7, $7) tp_assign($5, $8, $8) tp_assign($6, $9, $9)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# types: 4 5 6 7
# args 8 9 10 11
NRARGS=4
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
'args: \(([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $8, $5 $9, $6 $10, $7 $11),\n'\
' TP_ARGS($8, $9, $10, $11),\n'\
' TP_STRUCT__entry(__field($4, $8) __field($5, $9) __field($6, $10) __field($7, $11)),\n'\
' TP_fast_assign(tp_assign($4, $8, $8) tp_assign($5, $9, $9) tp_assign($6, $10, $10) tp_assign($7, $11, $11)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# types: 4 5 6 7 8
# args 9 10 11 12 13
NRARGS=5
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\) '\
'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $9, $5 $10, $6 $11, $7 $12, $8 $13),\n'\
' TP_ARGS($9, $10, $11, $12, $13),\n'\
' TP_STRUCT__entry(__field($4, $9) __field($5, $10) __field($6, $11) __field($7, $12) __field($8, $13)),\n'\
' TP_fast_assign(tp_assign($4, $9, $9) tp_assign($5, $10, $10) tp_assign($6, $11, $11) tp_assign($7, $12, $12) tp_assign($8, $13, $13)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# types: 4 5 6 7 8 9
# args 10 11 12 13 14 15
NRARGS=6
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) '\
'types: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\) '\
'args: \(([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^,]*), ([^\)]*)\)/'\
'#ifndef OVERRIDE_'"${BITNESS}"'_sys_$1\n'\
'SC_TRACE_EVENT(sys_$1,\n'\
' TP_PROTO($4 $10, $5 $11, $6 $12, $7 $13, $8 $14, $9 $15),\n'\
' TP_ARGS($10, $11, $12, $13, $14, $15),\n'\
' TP_STRUCT__entry(__field($4, $10) __field($5, $11) __field($6, $12) __field($7, $13) __field($8, $14) __field($9, $15)),\n'\
' TP_fast_assign(tp_assign($4, $10, $10) tp_assign($5, $11, $11) tp_assign($6, $12, $12) tp_assign($7, $13, $13) tp_assign($8, $14, $14) tp_assign($9, $15, $15)),\n'\
' TP_printk()\n'\
')\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
# Macro for tracing syscall table
rm -f ${TMPFILE}
for NRARGS in $(seq 0 6); do
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} >> ${TMPFILE}
done
echo \
"
#endif /* _TRACE_SYSCALLS_${CLASSCAP}_H */
/* This part must be outside protection */
#include \"../../../probes/define_trace.h\"
#else /* CREATE_SYSCALL_TABLE */
#include \"${INPUTFILE}_${CLASS}_override.h\"
#include \"syscalls_${CLASS}_override.h\"
" >> ${HEADER}
NRARGS=0
if [ "$CLASS" = integers ]; then
#noargs
grep "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
'TRACE_SYSCALL_TABLE\(syscalls_noargs, sys_$1, $2, $3\)\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
fi
#others.
grep -v "^syscall [^ ]* nr [^ ]* nbargs ${NRARGS} " ${SRCFILE} > ${TMPFILE}
perl -p -e 's/^syscall ([^ ]*) nr ([^ ]*) nbargs ([^ ]*) .*$/'\
'#ifndef OVERRIDE_TABLE_'"${BITNESS}"'_sys_$1\n'\
'TRACE_SYSCALL_TABLE(sys_$1, sys_$1, $2, $3)\n'\
'#endif/g'\
${TMPFILE} >> ${HEADER}
echo -n \
"
#endif /* CREATE_SYSCALL_TABLE */
" >> ${HEADER}
#fields names: ...char * type with *name* or *file* or *path* or *root*
# or *put_old* or *type*
cp -f ${HEADER} ${TMPFILE}
rm -f ${HEADER}
perl -p -e 's/__field\(([^,)]*char \*), ([^\)]*)(name|file|path|root|put_old|type)([^\)]*)\)/__string_from_user($2$3$4, $2$3$4)/g'\
${TMPFILE} >> ${HEADER}
cp -f ${HEADER} ${TMPFILE}
rm -f ${HEADER}
perl -p -e 's/tp_assign\(([^,)]*char \*), ([^,]*)(name|file|path|root|put_old|type)([^,]*), ([^\)]*)\)/tp_copy_string_from_user($2$3$4, $5)/g'\
${TMPFILE} >> ${HEADER}
#prettify addresses heuristics.
#field names with addr or ptr
cp -f ${HEADER} ${TMPFILE}
rm -f ${HEADER}
perl -p -e 's/__field\(([^,)]*), ([^,)]*addr|[^,)]*ptr)([^),]*)\)/__field_hex($1, $2$3)/g'\
${TMPFILE} >> ${HEADER}
#field types ending with '*'
cp -f ${HEADER} ${TMPFILE}
rm -f ${HEADER}
perl -p -e 's/__field\(([^,)]*\*), ([^),]*)\)/__field_hex($1, $2)/g'\
${TMPFILE} >> ${HEADER}
#strip the extra type information from tp_assign.
cp -f ${HEADER} ${TMPFILE}
rm -f ${HEADER}
perl -p -e 's/tp_assign\(([^,)]*), ([^,]*), ([^\)]*)\)/tp_assign($2, $3)/g'\
${TMPFILE} >> ${HEADER}
rm -f ${INPUTFILE}.tmp
rm -f ${TMPFILE}
rm -f ${SRCFILE}

View file

@ -1,11 +0,0 @@
obj-m += lib-ring-buffer.o
lib-ring-buffer-objs := \
ringbuffer/ring_buffer_backend.o \
ringbuffer/ring_buffer_frontend.o \
ringbuffer/ring_buffer_iterator.o \
ringbuffer/ring_buffer_vfs.o \
ringbuffer/ring_buffer_splice.o \
ringbuffer/ring_buffer_mmap.o \
prio_heap/lttng_prio_heap.o \
../wrapper/splice.o

View file

@ -1,61 +0,0 @@
#ifndef _LTTNG_ALIGN_H
#define _LTTNG_ALIGN_H
/*
* lib/align.h
*
* (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifdef __KERNEL__
#include <linux/types.h>
#include "bug.h"
#define ALIGN_FLOOR(x, a) __ALIGN_FLOOR_MASK(x, (typeof(x)) (a) - 1)
#define __ALIGN_FLOOR_MASK(x, mask) ((x) & ~(mask))
#define PTR_ALIGN_FLOOR(p, a) \
((typeof(p)) ALIGN_FLOOR((unsigned long) (p), a))
/*
* Align pointer on natural object alignment.
*/
#define object_align(obj) PTR_ALIGN(obj, __alignof__(*(obj)))
#define object_align_floor(obj) PTR_ALIGN_FLOOR(obj, __alignof__(*(obj)))
/**
* offset_align - Calculate the offset needed to align an object on its natural
* alignment towards higher addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
*
* Returns the offset that must be added to align towards higher
* addresses.
*/
#define offset_align(align_drift, alignment) \
({ \
BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
(((alignment) - (align_drift)) & ((alignment) - 1)); \
})
/**
* offset_align_floor - Calculate the offset needed to align an object
* on its natural alignment towards lower addresses.
* @align_drift: object offset from an "alignment"-aligned address.
* @alignment: natural object alignment. Must be non-zero, power of 2.
*
* Returns the offset that must be substracted to align towards lower addresses.
*/
#define offset_align_floor(align_drift, alignment) \
({ \
BUILD_RUNTIME_BUG_ON((alignment) == 0 \
|| ((alignment) & ((alignment) - 1))); \
(((align_drift) - (alignment)) & ((alignment) - 1); \
})
#endif /* __KERNEL__ */
#endif

View file

@ -1,400 +0,0 @@
#ifndef _BABELTRACE_BITFIELD_H
#define _BABELTRACE_BITFIELD_H
/*
* BabelTrace
*
* Bitfields read/write functions.
*
* Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#include "../ltt-endian.h"
#ifndef CHAR_BIT
#define CHAR_BIT 8
#endif
/* We can't shift a int from 32 bit, >> 32 and << 32 on int is undefined */
#define _bt_piecewise_rshift(_v, _shift) \
({ \
typeof(_v) ___v = (_v); \
typeof(_shift) ___shift = (_shift); \
unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
\
for (; sb; sb--) \
___v >>= sizeof(___v) * CHAR_BIT - 1; \
___v >>= final; \
})
#define _bt_piecewise_lshift(_v, _shift) \
({ \
typeof(_v) ___v = (_v); \
typeof(_shift) ___shift = (_shift); \
unsigned long sb = (___shift) / (sizeof(___v) * CHAR_BIT - 1); \
unsigned long final = (___shift) % (sizeof(___v) * CHAR_BIT - 1); \
\
for (; sb; sb--) \
___v <<= sizeof(___v) * CHAR_BIT - 1; \
___v <<= final; \
})
#define _bt_is_signed_type(type) (((type)(-1)) < 0)
#define _bt_unsigned_cast(type, v) \
({ \
(sizeof(v) < sizeof(type)) ? \
((type) (v)) & (~(~(type) 0 << (sizeof(v) * CHAR_BIT))) : \
(type) (v); \
})
/*
* bt_bitfield_write - write integer to a bitfield in native endianness
*
* Save integer to the bitfield, which starts at the "start" bit, has "len"
* bits.
* The inside of a bitfield is from high bits to low bits.
* Uses native endianness.
* For unsigned "v", pad MSB with 0 if bitfield is larger than v.
* For signed "v", sign-extend v if bitfield is larger than v.
*
* On little endian, bytes are placed from the less significant to the most
* significant. Also, consecutive bitfields are placed from lower bits to higher
* bits.
*
* On big endian, bytes are places from most significant to less significant.
* Also, consecutive bitfields are placed from higher to lower bits.
*/
#define _bt_bitfield_write_le(_ptr, type, _start, _length, _v) \
do { \
typeof(_v) __v = (_v); \
type *__ptr = (void *) (_ptr); \
unsigned long __start = (_start), __length = (_length); \
type mask, cmask; \
unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
unsigned long start_unit, end_unit, this_unit; \
unsigned long end, cshift; /* cshift is "complement shift" */ \
\
if (!__length) \
break; \
\
end = __start + __length; \
start_unit = __start / ts; \
end_unit = (end + (ts - 1)) / ts; \
\
/* Trim v high bits */ \
if (__length < sizeof(__v) * CHAR_BIT) \
__v &= ~((~(typeof(__v)) 0) << __length); \
\
/* We can now append v with a simple "or", shift it piece-wise */ \
this_unit = start_unit; \
if (start_unit == end_unit - 1) { \
mask = ~((~(type) 0) << (__start % ts)); \
if (end % ts) \
mask |= (~(type) 0) << (end % ts); \
cmask = (type) __v << (__start % ts); \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
break; \
} \
if (__start % ts) { \
cshift = __start % ts; \
mask = ~((~(type) 0) << cshift); \
cmask = (type) __v << cshift; \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
__v = _bt_piecewise_rshift(__v, ts - cshift); \
__start += ts - cshift; \
this_unit++; \
} \
for (; this_unit < end_unit - 1; this_unit++) { \
__ptr[this_unit] = (type) __v; \
__v = _bt_piecewise_rshift(__v, ts); \
__start += ts; \
} \
if (end % ts) { \
mask = (~(type) 0) << (end % ts); \
cmask = (type) __v; \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
} else \
__ptr[this_unit] = (type) __v; \
} while (0)
#define _bt_bitfield_write_be(_ptr, type, _start, _length, _v) \
do { \
typeof(_v) __v = (_v); \
type *__ptr = (void *) (_ptr); \
unsigned long __start = (_start), __length = (_length); \
type mask, cmask; \
unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
unsigned long start_unit, end_unit, this_unit; \
unsigned long end, cshift; /* cshift is "complement shift" */ \
\
if (!__length) \
break; \
\
end = __start + __length; \
start_unit = __start / ts; \
end_unit = (end + (ts - 1)) / ts; \
\
/* Trim v high bits */ \
if (__length < sizeof(__v) * CHAR_BIT) \
__v &= ~((~(typeof(__v)) 0) << __length); \
\
/* We can now append v with a simple "or", shift it piece-wise */ \
this_unit = end_unit - 1; \
if (start_unit == end_unit - 1) { \
mask = ~((~(type) 0) << ((ts - (end % ts)) % ts)); \
if (__start % ts) \
mask |= (~((type) 0)) << (ts - (__start % ts)); \
cmask = (type) __v << ((ts - (end % ts)) % ts); \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
break; \
} \
if (end % ts) { \
cshift = end % ts; \
mask = ~((~(type) 0) << (ts - cshift)); \
cmask = (type) __v << (ts - cshift); \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
__v = _bt_piecewise_rshift(__v, cshift); \
end -= cshift; \
this_unit--; \
} \
for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
__ptr[this_unit] = (type) __v; \
__v = _bt_piecewise_rshift(__v, ts); \
end -= ts; \
} \
if (__start % ts) { \
mask = (~(type) 0) << (ts - (__start % ts)); \
cmask = (type) __v; \
cmask &= ~mask; \
__ptr[this_unit] &= mask; \
__ptr[this_unit] |= cmask; \
} else \
__ptr[this_unit] = (type) __v; \
} while (0)
/*
* bt_bitfield_write - write integer to a bitfield in native endianness
* bt_bitfield_write_le - write integer to a bitfield in little endian
* bt_bitfield_write_be - write integer to a bitfield in big endian
*/
#if (__BYTE_ORDER == __LITTLE_ENDIAN)
#define bt_bitfield_write(ptr, type, _start, _length, _v) \
_bt_bitfield_write_le(ptr, type, _start, _length, _v)
#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
_bt_bitfield_write_le(ptr, type, _start, _length, _v)
#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
_bt_bitfield_write_be(ptr, unsigned char, _start, _length, _v)
#elif (__BYTE_ORDER == __BIG_ENDIAN)
#define bt_bitfield_write(ptr, type, _start, _length, _v) \
_bt_bitfield_write_be(ptr, type, _start, _length, _v)
#define bt_bitfield_write_le(ptr, type, _start, _length, _v) \
_bt_bitfield_write_le(ptr, unsigned char, _start, _length, _v)
#define bt_bitfield_write_be(ptr, type, _start, _length, _v) \
_bt_bitfield_write_be(ptr, type, _start, _length, _v)
#else /* (BYTE_ORDER == PDP_ENDIAN) */
#error "Byte order not supported"
#endif
#define _bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
do { \
typeof(*(_vptr)) *__vptr = (_vptr); \
typeof(*__vptr) __v; \
type *__ptr = (void *) (_ptr); \
unsigned long __start = (_start), __length = (_length); \
type mask, cmask; \
unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
unsigned long start_unit, end_unit, this_unit; \
unsigned long end, cshift; /* cshift is "complement shift" */ \
\
if (!__length) { \
*__vptr = 0; \
break; \
} \
\
end = __start + __length; \
start_unit = __start / ts; \
end_unit = (end + (ts - 1)) / ts; \
\
this_unit = end_unit - 1; \
if (_bt_is_signed_type(typeof(__v)) \
&& (__ptr[this_unit] & ((type) 1 << ((end % ts ? : ts) - 1)))) \
__v = ~(typeof(__v)) 0; \
else \
__v = 0; \
if (start_unit == end_unit - 1) { \
cmask = __ptr[this_unit]; \
cmask >>= (__start % ts); \
if ((end - __start) % ts) { \
mask = ~((~(type) 0) << (end - __start)); \
cmask &= mask; \
} \
__v = _bt_piecewise_lshift(__v, end - __start); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
*__vptr = __v; \
break; \
} \
if (end % ts) { \
cshift = end % ts; \
mask = ~((~(type) 0) << cshift); \
cmask = __ptr[this_unit]; \
cmask &= mask; \
__v = _bt_piecewise_lshift(__v, cshift); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
end -= cshift; \
this_unit--; \
} \
for (; (long) this_unit >= (long) start_unit + 1; this_unit--) { \
__v = _bt_piecewise_lshift(__v, ts); \
__v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
end -= ts; \
} \
if (__start % ts) { \
mask = ~((~(type) 0) << (ts - (__start % ts))); \
cmask = __ptr[this_unit]; \
cmask >>= (__start % ts); \
cmask &= mask; \
__v = _bt_piecewise_lshift(__v, ts - (__start % ts)); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
} else { \
__v = _bt_piecewise_lshift(__v, ts); \
__v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
} \
*__vptr = __v; \
} while (0)
#define _bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
do { \
typeof(*(_vptr)) *__vptr = (_vptr); \
typeof(*__vptr) __v; \
type *__ptr = (void *) (_ptr); \
unsigned long __start = (_start), __length = (_length); \
type mask, cmask; \
unsigned long ts = sizeof(type) * CHAR_BIT; /* type size */ \
unsigned long start_unit, end_unit, this_unit; \
unsigned long end, cshift; /* cshift is "complement shift" */ \
\
if (!__length) { \
*__vptr = 0; \
break; \
} \
\
end = __start + __length; \
start_unit = __start / ts; \
end_unit = (end + (ts - 1)) / ts; \
\
this_unit = start_unit; \
if (_bt_is_signed_type(typeof(__v)) \
&& (__ptr[this_unit] & ((type) 1 << (ts - (__start % ts) - 1)))) \
__v = ~(typeof(__v)) 0; \
else \
__v = 0; \
if (start_unit == end_unit - 1) { \
cmask = __ptr[this_unit]; \
cmask >>= (ts - (end % ts)) % ts; \
if ((end - __start) % ts) { \
mask = ~((~(type) 0) << (end - __start)); \
cmask &= mask; \
} \
__v = _bt_piecewise_lshift(__v, end - __start); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
*__vptr = __v; \
break; \
} \
if (__start % ts) { \
cshift = __start % ts; \
mask = ~((~(type) 0) << (ts - cshift)); \
cmask = __ptr[this_unit]; \
cmask &= mask; \
__v = _bt_piecewise_lshift(__v, ts - cshift); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
__start += ts - cshift; \
this_unit++; \
} \
for (; this_unit < end_unit - 1; this_unit++) { \
__v = _bt_piecewise_lshift(__v, ts); \
__v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
__start += ts; \
} \
if (end % ts) { \
mask = ~((~(type) 0) << (end % ts)); \
cmask = __ptr[this_unit]; \
cmask >>= ts - (end % ts); \
cmask &= mask; \
__v = _bt_piecewise_lshift(__v, end % ts); \
__v |= _bt_unsigned_cast(typeof(__v), cmask); \
} else { \
__v = _bt_piecewise_lshift(__v, ts); \
__v |= _bt_unsigned_cast(typeof(__v), __ptr[this_unit]);\
} \
*__vptr = __v; \
} while (0)
/*
* bt_bitfield_read - read integer from a bitfield in native endianness
* bt_bitfield_read_le - read integer from a bitfield in little endian
* bt_bitfield_read_be - read integer from a bitfield in big endian
*/
#if (__BYTE_ORDER == __LITTLE_ENDIAN)
#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_le(_ptr, type, _start, _length, _vptr)
#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_be(_ptr, unsigned char, _start, _length, _vptr)
#elif (__BYTE_ORDER == __BIG_ENDIAN)
#define bt_bitfield_read(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
#define bt_bitfield_read_le(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_le(_ptr, unsigned char, _start, _length, _vptr)
#define bt_bitfield_read_be(_ptr, type, _start, _length, _vptr) \
_bt_bitfield_read_be(_ptr, type, _start, _length, _vptr)
#else /* (__BYTE_ORDER == __PDP_ENDIAN) */
#error "Byte order not supported"
#endif
#endif /* _BABELTRACE_BITFIELD_H */

View file

@ -1,29 +0,0 @@
#ifndef _LTTNG_BUG_H
#define _LTTNG_BUG_H
/*
* lib/bug.h
*
* (C) Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
/**
* BUILD_RUNTIME_BUG_ON - check condition at build (if constant) or runtime
* @condition: the condition which should be false.
*
* If the condition is a constant and true, the compiler will generate a build
* error. If the condition is not constant, a BUG will be triggered at runtime
* if the condition is ever true. If the condition is constant and false, no
* code is emitted.
*/
#define BUILD_RUNTIME_BUG_ON(condition) \
do { \
if (__builtin_constant_p(condition)) \
BUILD_BUG_ON(condition); \
else \
BUG_ON(condition); \
} while (0)
#endif

View file

@ -1,207 +0,0 @@
/*
* lttng_prio_heap.c
*
* Priority heap containing pointers. Based on CLRS, chapter 6.
*
* Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#include <linux/slab.h>
#include "lttng_prio_heap.h"
#ifdef DEBUG_HEAP
void lttng_check_heap(const struct lttng_ptr_heap *heap)
{
size_t i;
if (!heap->len)
return;
for (i = 1; i < heap->len; i++)
WARN_ON_ONCE(!heap->gt(heap->ptrs[i], heap->ptrs[0]));
}
#endif
static
size_t parent(size_t i)
{
return (i -1) >> 1;
}
static
size_t left(size_t i)
{
return (i << 1) + 1;
}
static
size_t right(size_t i)
{
return (i << 1) + 2;
}
/*
* Copy of heap->ptrs pointer is invalid after heap_grow.
*/
static
int heap_grow(struct lttng_ptr_heap *heap, size_t new_len)
{
void **new_ptrs;
if (heap->alloc_len >= new_len)
return 0;
heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
new_ptrs = kmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
if (!new_ptrs)
return -ENOMEM;
if (heap->ptrs)
memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
kfree(heap->ptrs);
heap->ptrs = new_ptrs;
return 0;
}
static
int heap_set_len(struct lttng_ptr_heap *heap, size_t new_len)
{
int ret;
ret = heap_grow(heap, new_len);
if (ret)
return ret;
heap->len = new_len;
return 0;
}
int lttng_heap_init(struct lttng_ptr_heap *heap, size_t alloc_len,
gfp_t gfpmask, int gt(void *a, void *b))
{
heap->ptrs = NULL;
heap->len = 0;
heap->alloc_len = 0;
heap->gt = gt;
heap->gfpmask = gfpmask;
/*
* Minimum size allocated is 1 entry to ensure memory allocation
* never fails within heap_replace_max.
*/
return heap_grow(heap, max_t(size_t, 1, alloc_len));
}
void lttng_heap_free(struct lttng_ptr_heap *heap)
{
kfree(heap->ptrs);
}
static void heapify(struct lttng_ptr_heap *heap, size_t i)
{
void **ptrs = heap->ptrs;
size_t l, r, largest;
for (;;) {
void *tmp;
l = left(i);
r = right(i);
if (l < heap->len && heap->gt(ptrs[l], ptrs[i]))
largest = l;
else
largest = i;
if (r < heap->len && heap->gt(ptrs[r], ptrs[largest]))
largest = r;
if (largest == i)
break;
tmp = ptrs[i];
ptrs[i] = ptrs[largest];
ptrs[largest] = tmp;
i = largest;
}
lttng_check_heap(heap);
}
void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p)
{
void *res;
if (!heap->len) {
(void) heap_set_len(heap, 1);
heap->ptrs[0] = p;
lttng_check_heap(heap);
return NULL;
}
/* Replace the current max and heapify */
res = heap->ptrs[0];
heap->ptrs[0] = p;
heapify(heap, 0);
return res;
}
int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p)
{
void **ptrs;
size_t pos;
int ret;
ret = heap_set_len(heap, heap->len + 1);
if (ret)
return ret;
ptrs = heap->ptrs;
pos = heap->len - 1;
while (pos > 0 && heap->gt(p, ptrs[parent(pos)])) {
/* Move parent down until we find the right spot */
ptrs[pos] = ptrs[parent(pos)];
pos = parent(pos);
}
ptrs[pos] = p;
lttng_check_heap(heap);
return 0;
}
void *lttng_heap_remove(struct lttng_ptr_heap *heap)
{
switch (heap->len) {
case 0:
return NULL;
case 1:
(void) heap_set_len(heap, 0);
return heap->ptrs[0];
}
/* Shrink, replace the current max by previous last entry and heapify */
heap_set_len(heap, heap->len - 1);
/* len changed. previous last entry is at heap->len */
return lttng_heap_replace_max(heap, heap->ptrs[heap->len]);
}
void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p)
{
size_t pos, len = heap->len;
for (pos = 0; pos < len; pos++)
if (heap->ptrs[pos] == p)
goto found;
return NULL;
found:
if (heap->len == 1) {
(void) heap_set_len(heap, 0);
lttng_check_heap(heap);
return heap->ptrs[0];
}
/* Replace p with previous last entry and heapify. */
heap_set_len(heap, heap->len - 1);
/* len changed. previous last entry is at heap->len */
heap->ptrs[pos] = heap->ptrs[heap->len];
heapify(heap, pos);
return p;
}

View file

@ -1,117 +0,0 @@
#ifndef _LTTNG_PRIO_HEAP_H
#define _LTTNG_PRIO_HEAP_H
/*
* lttng_prio_heap.h
*
* Priority heap containing pointers. Based on CLRS, chapter 6.
*
* Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#include <linux/gfp.h>
struct lttng_ptr_heap {
size_t len, alloc_len;
void **ptrs;
int (*gt)(void *a, void *b);
gfp_t gfpmask;
};
#ifdef DEBUG_HEAP
void lttng_check_heap(const struct lttng_ptr_heap *heap);
#else
static inline
void lttng_check_heap(const struct lttng_ptr_heap *heap)
{
}
#endif
/**
* lttng_heap_maximum - return the largest element in the heap
* @heap: the heap to be operated on
*
* Returns the largest element in the heap, without performing any modification
* to the heap structure. Returns NULL if the heap is empty.
*/
static inline void *lttng_heap_maximum(const struct lttng_ptr_heap *heap)
{
lttng_check_heap(heap);
return heap->len ? heap->ptrs[0] : NULL;
}
/**
* lttng_heap_init - initialize the heap
* @heap: the heap to initialize
* @alloc_len: number of elements initially allocated
* @gfp: allocation flags
* @gt: function to compare the elements
*
* Returns -ENOMEM if out of memory.
*/
extern int lttng_heap_init(struct lttng_ptr_heap *heap,
size_t alloc_len, gfp_t gfpmask,
int gt(void *a, void *b));
/**
* lttng_heap_free - free the heap
* @heap: the heap to free
*/
extern void lttng_heap_free(struct lttng_ptr_heap *heap);
/**
* lttng_heap_insert - insert an element into the heap
* @heap: the heap to be operated on
* @p: the element to add
*
* Insert an element into the heap.
*
* Returns -ENOMEM if out of memory.
*/
extern int lttng_heap_insert(struct lttng_ptr_heap *heap, void *p);
/**
* lttng_heap_remove - remove the largest element from the heap
* @heap: the heap to be operated on
*
* Returns the largest element in the heap. It removes this element from the
* heap. Returns NULL if the heap is empty.
*/
extern void *lttng_heap_remove(struct lttng_ptr_heap *heap);
/**
* lttng_heap_cherrypick - remove a given element from the heap
* @heap: the heap to be operated on
* @p: the element
*
* Remove the given element from the heap. Return the element if present, else
* return NULL. This algorithm has a complexity of O(n), which is higher than
* O(log(n)) provided by the rest of this API.
*/
extern void *lttng_heap_cherrypick(struct lttng_ptr_heap *heap, void *p);
/**
* lttng_heap_replace_max - replace the the largest element from the heap
* @heap: the heap to be operated on
* @p: the pointer to be inserted as topmost element replacement
*
* Returns the largest element in the heap. It removes this element from the
* heap. The heap is rebalanced only once after the insertion. Returns NULL if
* the heap is empty.
*
* This is the equivalent of calling heap_remove() and then heap_insert(), but
* it only rebalances the heap once. It never allocates memory.
*/
extern void *lttng_heap_replace_max(struct lttng_ptr_heap *heap, void *p);
#endif /* _LTTNG_PRIO_HEAP_H */

View file

@ -1,25 +0,0 @@
#ifndef _LINUX_RING_BUFFER_API_H
#define _LINUX_RING_BUFFER_API_H
/*
* linux/ringbuffer/api.h
*
* Copyright (C) 2010 - Mathieu Desnoyers "mathieu.desnoyers@efficios.com"
*
* Ring Buffer API.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
#include "../../wrapper/ringbuffer/vfs.h"
/*
* ring_buffer_frontend_api.h contains static inline functions that depend on
* client static inlines. Hence the inclusion of this "api" header only
* within the client.
*/
#include "../../wrapper/ringbuffer/frontend_api.h"
#endif /* _LINUX_RING_BUFFER_API_H */

View file

@ -1,250 +0,0 @@
#ifndef _LINUX_RING_BUFFER_BACKEND_H
#define _LINUX_RING_BUFFER_BACKEND_H
/*
* linux/ringbuffer/backend.h
*
* Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer backend (API).
*
* Dual LGPL v2.1/GPL v2 license.
*
* Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
* the reader in flight recorder mode.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/mm.h>
/* Internal helpers */
#include "../../wrapper/ringbuffer/backend_internal.h"
#include "../../wrapper/ringbuffer/frontend_internal.h"
/* Ring buffer backend API */
/* Ring buffer backend access (read/write) */
extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
size_t offset, void __user *dest,
size_t len);
extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
size_t offset, void *dest, size_t len);
extern struct page **
lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
void ***virt);
/*
* Return the address where a given offset is located.
* Should be used to get the current subbuffer header pointer. Given we know
* it's never on a page boundary, it's safe to write directly to this address,
* as long as the write is never bigger than a page size.
*/
extern void *
lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset);
extern void *
lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset);
/**
* lib_ring_buffer_write - write data to a buffer backend
* @config : ring buffer instance configuration
* @ctx: ring buffer context. (input arguments only)
* @src : source pointer to copy from
* @len : length of data to copy
*
* This function copies "len" bytes of data from a source pointer to a buffer
* backend, at the current context offset. This is more or less a buffer
* backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
* if copy is crossing a page boundary.
*/
static inline
void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
const void *src, size_t len)
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
size_t sbidx, index;
size_t offset = ctx->buf_offset;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(ctx->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
if (likely(pagecpy == len))
lib_ring_buffer_do_copy(config,
rpages->p[index].virt
+ (offset & ~PAGE_MASK),
src, len);
else
_lib_ring_buffer_write(bufb, offset, src, len, 0);
ctx->buf_offset += len;
}
/**
* lib_ring_buffer_memset - write len bytes of c to a buffer backend
* @config : ring buffer instance configuration
* @bufb : ring buffer backend
* @offset : offset within the buffer
* @c : the byte to copy
* @len : number of bytes to copy
*
* This function writes "len" bytes of "c" to a buffer backend, at a specific
* offset. This is more or less a buffer backend-specific memset() operation.
* Calls the slow path (_ring_buffer_memset) if write is crossing a page
* boundary.
*/
static inline
void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx, int c, size_t len)
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
size_t sbidx, index;
size_t offset = ctx->buf_offset;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(ctx->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
if (likely(pagecpy == len))
lib_ring_buffer_do_memset(rpages->p[index].virt
+ (offset & ~PAGE_MASK),
c, len);
else
_lib_ring_buffer_memset(bufb, offset, c, len, 0);
ctx->buf_offset += len;
}
/**
* lib_ring_buffer_copy_from_user - write userspace data to a buffer backend
* @config : ring buffer instance configuration
* @ctx: ring buffer context. (input arguments only)
* @src : userspace source pointer to copy from
* @len : length of data to copy
*
* This function copies "len" bytes of data from a userspace pointer to a
* buffer backend, at the current context offset. This is more or less a buffer
* backend-specific memcpy() operation. Calls the slow path
* (_ring_buffer_write_from_user) if copy is crossing a page boundary.
*/
static inline
void lib_ring_buffer_copy_from_user(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
struct channel_backend *chanb = &ctx->chan->backend;
size_t sbidx, index;
size_t offset = ctx->buf_offset;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
unsigned long ret;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(ctx->chan,
config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
if (unlikely(!access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
if (likely(pagecpy == len)) {
ret = lib_ring_buffer_do_copy_from_user(
rpages->p[index].virt + (offset & ~PAGE_MASK),
src, len);
if (unlikely(ret > 0)) {
len -= (pagecpy - ret);
offset += (pagecpy - ret);
goto fill_buffer;
}
} else {
_lib_ring_buffer_copy_from_user(bufb, offset, src, len, 0);
}
ctx->buf_offset += len;
return;
fill_buffer:
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
*/
_lib_ring_buffer_memset(bufb, offset, 0, len, 0);
}
/*
* This accessor counts the number of unread records in a buffer.
* It only provides a consistent value if no reads not writes are performed
* concurrently.
*/
static inline
unsigned long lib_ring_buffer_get_records_unread(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
struct lib_ring_buffer_backend_pages *pages;
unsigned long records_unread = 0, sb_bindex, id;
unsigned int i;
for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
id = bufb->buf_wsb[i].id;
sb_bindex = subbuffer_id_get_index(config, id);
pages = bufb->array[sb_bindex];
records_unread += v_read(config, &pages->records_unread);
}
if (config->mode == RING_BUFFER_OVERWRITE) {
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
pages = bufb->array[sb_bindex];
records_unread += v_read(config, &pages->records_unread);
}
return records_unread;
}
ssize_t lib_ring_buffer_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin);
#endif /* _LINUX_RING_BUFFER_BACKEND_H */

View file

@ -1,449 +0,0 @@
#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H
/*
* linux/ringbuffer/backend_internal.h
*
* Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer backend (internal helpers).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/ringbuffer/frontend_types.h"
#include <linux/string.h>
#include <linux/uaccess.h>
/* Ring buffer backend API presented to the frontend */
/* Ring buffer and channel backend create/free */
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
struct channel_backend *chan, int cpu);
void channel_backend_unregister_notifiers(struct channel_backend *chanb);
void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
int channel_backend_init(struct channel_backend *chanb,
const char *name,
const struct lib_ring_buffer_config *config,
void *priv, size_t subbuf_size,
size_t num_subbuf);
void channel_backend_free(struct channel_backend *chanb);
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
void channel_backend_reset(struct channel_backend *chanb);
int lib_ring_buffer_backend_init(void);
void lib_ring_buffer_backend_exit(void);
extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
size_t offset, const void *src, size_t len,
ssize_t pagecpy);
extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
size_t offset, int c, size_t len,
ssize_t pagecpy);
extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
size_t offset, const void *src,
size_t len, ssize_t pagecpy);
/*
* Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
* exchanged atomically.
*
* Top half word, except lowest bit, belongs to "offset", which is used to keep
* to count the produced buffers. For overwrite mode, this provides the
* consumer with the capacity to read subbuffers in order, handling the
* situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
* systems) concurrently with a single execution of get_subbuf (between offset
* sampling and subbuffer ID exchange).
*/
#define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
/*
* Lowest bit of top word half belongs to noref. Used only for overwrite mode.
*/
#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
/*
* In overwrite mode: lowest half of word is used for index.
* Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
* In producer-consumer mode: whole word used for index.
*/
#define SB_ID_INDEX_SHIFT 0
#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
/*
* Construct the subbuffer id from offset, index and noref. Use only the index
* for producer-consumer mode (offset and noref are only used in overwrite
* mode).
*/
static inline
unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
unsigned long offset, unsigned long noref,
unsigned long index)
{
if (config->mode == RING_BUFFER_OVERWRITE)
return (offset << SB_ID_OFFSET_SHIFT)
| (noref << SB_ID_NOREF_SHIFT)
| index;
else
return index;
}
/*
* Compare offset with the offset contained within id. Return 1 if the offset
* bits are identical, else 0.
*/
static inline
int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
unsigned long id, unsigned long offset)
{
return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
}
static inline
unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
return id & SB_ID_INDEX_MASK;
else
return id;
}
static inline
unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
unsigned long id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
return !!(id & SB_ID_NOREF_MASK);
else
return 1;
}
/*
* Only used by reader on subbuffer ID it has exclusive access to. No volatile
* needed.
*/
static inline
void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
*id |= SB_ID_NOREF_MASK;
}
static inline
void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
unsigned long *id, unsigned long offset)
{
unsigned long tmp;
if (config->mode == RING_BUFFER_OVERWRITE) {
tmp = *id;
tmp &= ~SB_ID_OFFSET_MASK;
tmp |= offset << SB_ID_OFFSET_SHIFT;
tmp |= SB_ID_NOREF_MASK;
/* Volatile store, read concurrently by readers. */
ACCESS_ONCE(*id) = tmp;
}
}
/* No volatile access, since already used locally */
static inline
void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
unsigned long *id)
{
if (config->mode == RING_BUFFER_OVERWRITE)
*id &= ~SB_ID_NOREF_MASK;
}
/*
* For overwrite mode, cap the number of subbuffers per buffer to:
* 2^16 on 32-bit architectures
* 2^32 on 64-bit architectures
* This is required to fit in the index part of the ID. Return 0 on success,
* -EPERM on failure.
*/
static inline
int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
unsigned long num_subbuf)
{
if (config->mode == RING_BUFFER_OVERWRITE)
return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
else
return 0;
}
static inline
void subbuffer_count_record(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
v_inc(config, &bufb->array[sb_bindex]->records_commit);
}
/*
* Reader has exclusive subbuffer access for record consumption. No need to
* perform the decrement atomically.
*/
static inline
void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb)
{
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
CHAN_WARN_ON(bufb->chan,
!v_read(config, &bufb->array[sb_bindex]->records_unread));
/* Non-atomic decrement protected by exclusive subbuffer access */
_v_dec(config, &bufb->array[sb_bindex]->records_unread);
v_inc(config, &bufb->records_read);
}
static inline
unsigned long subbuffer_get_records_count(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
return v_read(config, &bufb->array[sb_bindex]->records_commit);
}
/*
* Must be executed at subbuffer delivery when the writer has _exclusive_
* subbuffer access. See ring_buffer_check_deliver() for details.
* ring_buffer_get_records_count() must be called to get the records count
* before this function, because it resets the records_commit count.
*/
static inline
unsigned long subbuffer_count_records_overrun(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx)
{
struct lib_ring_buffer_backend_pages *pages;
unsigned long overruns, sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
pages = bufb->array[sb_bindex];
overruns = v_read(config, &pages->records_unread);
v_set(config, &pages->records_unread,
v_read(config, &pages->records_commit));
v_set(config, &pages->records_commit, 0);
return overruns;
}
static inline
void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx,
unsigned long data_size)
{
struct lib_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
pages = bufb->array[sb_bindex];
pages->data_size = data_size;
}
static inline
unsigned long subbuffer_get_read_data_size(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb)
{
struct lib_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
pages = bufb->array[sb_bindex];
return pages->data_size;
}
static inline
unsigned long subbuffer_get_data_size(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx)
{
struct lib_ring_buffer_backend_pages *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
pages = bufb->array[sb_bindex];
return pages->data_size;
}
/**
* lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
* writer.
*/
static inline
void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx)
{
unsigned long id, new_id;
if (config->mode != RING_BUFFER_OVERWRITE)
return;
/*
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
/*
* Store after load dependency ordering the writes to
* the subbuffer after load and test of the noref flag
* matches the memory barrier implied by the cmpxchg()
* in update_read_sb_index().
*/
return; /* Already writing to this buffer */
}
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
if (likely(new_id == id))
break;
id = new_id;
}
}
/**
* lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
* called by writer.
*/
static inline
void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx, unsigned long offset)
{
if (config->mode != RING_BUFFER_OVERWRITE)
return;
/*
* Because ring_buffer_set_noref() is only called by a single thread
* (the one which updated the cc_sb value), there are no concurrent
* updates to take care of: other writers have not updated cc_sb, so
* they cannot set the noref flag, and concurrent readers cannot modify
* the pointer because the noref flag is not set yet.
* The smp_wmb() in ring_buffer_commit() takes care of ordering writes
* to the subbuffer before this set noref operation.
* subbuffer_set_noref() uses a volatile store to deal with concurrent
* readers of the noref flag.
*/
CHAN_WARN_ON(bufb->chan,
subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
/*
* Memory barrier that ensures counter stores are ordered before set
* noref and offset.
*/
smp_mb();
subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
}
/**
* update_read_sb_index - Read-side subbuffer index update.
*/
static inline
int update_read_sb_index(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb,
unsigned long consumed_idx,
unsigned long consumed_count)
{
unsigned long old_id, new_id;
if (config->mode == RING_BUFFER_OVERWRITE) {
/*
* Exchange the target writer subbuffer with our own unused
* subbuffer. No need to use ACCESS_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
old_id = bufb->buf_wsb[consumed_idx].id;
if (unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
* Make sure the offset count we are expecting matches the one
* indicated by the writer.
*/
if (unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
CHAN_WARN_ON(bufb->chan,
!subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
consumed_count);
new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
bufb->buf_rsb.id);
if (unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
/* No page exchange, use the writer page directly */
bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
}
return 0;
}
/*
* Use the architecture-specific memcpy implementation for constant-sized
* inputs, but rely on an inline memcpy for length statically unknown.
* The function call to memcpy is just way too expensive for a fast path.
*/
#define lib_ring_buffer_do_copy(config, dest, src, len) \
do { \
size_t __len = (len); \
if (__builtin_constant_p(len)) \
memcpy(dest, src, __len); \
else \
inline_memcpy(dest, src, __len); \
} while (0)
/*
* We use __copy_from_user to copy userspace data since we already
* did the access_ok for the whole range.
*/
static inline
unsigned long lib_ring_buffer_do_copy_from_user(void *dest,
const void __user *src,
unsigned long len)
{
return __copy_from_user(dest, src, len);
}
/*
* write len bytes to dest with c
*/
static inline
void lib_ring_buffer_do_memset(char *dest, int c,
unsigned long len)
{
unsigned long i;
for (i = 0; i < len; i++)
dest[i] = c;
}
#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */

View file

@ -1,80 +0,0 @@
#ifndef _LINUX_RING_BUFFER_BACKEND_TYPES_H
#define _LINUX_RING_BUFFER_BACKEND_TYPES_H
/*
* linux/ringbuffer/backend_types.h
*
* Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer backend (types).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/cpumask.h>
#include <linux/types.h>
struct lib_ring_buffer_backend_page {
void *virt; /* page virtual address (cached) */
struct page *page; /* pointer to page structure */
};
struct lib_ring_buffer_backend_pages {
unsigned long mmap_offset; /* offset of the subbuffer in mmap */
union v_atomic records_commit; /* current records committed count */
union v_atomic records_unread; /* records to read */
unsigned long data_size; /* Amount of data to read from subbuf */
struct lib_ring_buffer_backend_page p[];
};
struct lib_ring_buffer_backend_subbuffer {
/* Identifier for subbuf backend pages. Exchanged atomically. */
unsigned long id; /* backend subbuffer identifier */
};
/*
* Forward declaration of frontend-specific channel and ring_buffer.
*/
struct channel;
struct lib_ring_buffer;
struct lib_ring_buffer_backend {
/* Array of ring_buffer_backend_subbuffer for writer */
struct lib_ring_buffer_backend_subbuffer *buf_wsb;
/* ring_buffer_backend_subbuffer for reader */
struct lib_ring_buffer_backend_subbuffer buf_rsb;
/*
* Pointer array of backend pages, for whole buffer.
* Indexed by ring_buffer_backend_subbuffer identifier (id) index.
*/
struct lib_ring_buffer_backend_pages **array;
unsigned int num_pages_per_subbuf;
struct channel *chan; /* Associated channel */
int cpu; /* This buffer's cpu. -1 if global. */
union v_atomic records_read; /* Number of records read */
uint allocated:1; /* is buffer allocated ? */
};
struct channel_backend {
unsigned long buf_size; /* Size of the buffer */
unsigned long subbuf_size; /* Sub-buffer size */
unsigned int subbuf_size_order; /* Order of sub-buffer size */
unsigned int num_subbuf_order; /*
* Order of number of sub-buffers/buffer
* for writer.
*/
unsigned int buf_size_order; /* Order of buffer size */
uint extra_reader_sb:1; /* has extra reader subbuffer ? */
struct lib_ring_buffer *buf; /* Channel per-cpu buffers */
unsigned long num_subbuf; /* Number of sub-buffers for writer */
u64 start_tsc; /* Channel creation TSC value */
void *priv; /* Client-specific information */
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
const struct lib_ring_buffer_config *config; /* Ring buffer configuration */
cpumask_var_t cpumask; /* Allocated per-cpu buffers cpumask */
char name[NAME_MAX]; /* Channel name */
};
#endif /* _LINUX_RING_BUFFER_BACKEND_TYPES_H */

View file

@ -1,298 +0,0 @@
#ifndef _LINUX_RING_BUFFER_CONFIG_H
#define _LINUX_RING_BUFFER_CONFIG_H
/*
* linux/ringbuffer/config.h
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer configuration header. Note: after declaring the standard inline
* functions, clients should also include linux/ringbuffer/api.h.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/types.h>
#include <linux/percpu.h>
#include "../align.h"
struct lib_ring_buffer;
struct channel;
struct lib_ring_buffer_config;
struct lib_ring_buffer_ctx;
/*
* Ring buffer client callbacks. Only used by slow path, never on fast path.
* For the fast path, record_header_size(), ring_buffer_clock_read() should be
* provided as inline functions too. These may simply return 0 if not used by
* the client.
*/
struct lib_ring_buffer_client_cb {
/* Mandatory callbacks */
/* A static inline version is also required for fast path */
u64 (*ring_buffer_clock_read) (struct channel *chan);
size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx);
void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size);
/* Optional callbacks (can be set to NULL) */
/* Called at buffer creation/finalize */
int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
int cpu, const char *name);
/*
* Clients should guarantee that no new reader handle can be opened
* after finalize.
*/
void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
/*
* Extract header length, payload length and timestamp from event
* record. Used by buffer iterators. Timestamp is only used by channel
* iterator.
*/
void (*record_get) (const struct lib_ring_buffer_config *config,
struct channel *chan, struct lib_ring_buffer *buf,
size_t offset, size_t *header_len,
size_t *payload_len, u64 *timestamp);
};
/*
* Ring buffer instance configuration.
*
* Declare as "static const" within the client object to ensure the inline fast
* paths can be optimized.
*
* alloc/sync pairs:
*
* RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
* Per-cpu buffers with per-cpu synchronization. Tracing must be performed
* with preemption disabled (lib_ring_buffer_get_cpu() and
* lib_ring_buffer_put_cpu()).
*
* RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
* Per-cpu buffer with global synchronization. Tracing can be performed with
* preemption enabled, statistically stays on the local buffers.
*
* RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
* Should only be used for buffers belonging to a single thread or protected
* by mutual exclusion by the client. Note that periodical sub-buffer switch
* should be disabled in this kind of configuration.
*
* RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
* Global shared buffer with global synchronization.
*
* wakeup:
*
* RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
* buffers and wake up readers if data is ready. Mainly useful for tracers which
* don't want to call into the wakeup code on the tracing path. Use in
* combination with "read_timer_interval" channel_create() argument.
*
* RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
* ready to read. Lower latencies before the reader is woken up. Mainly suitable
* for drivers.
*
* RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
* has the responsibility to perform wakeups.
*/
struct lib_ring_buffer_config {
enum {
RING_BUFFER_ALLOC_PER_CPU,
RING_BUFFER_ALLOC_GLOBAL,
} alloc;
enum {
RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
} sync;
enum {
RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
RING_BUFFER_DISCARD, /* Discard when buffer full */
} mode;
enum {
RING_BUFFER_SPLICE,
RING_BUFFER_MMAP,
RING_BUFFER_READ, /* TODO */
RING_BUFFER_ITERATOR,
RING_BUFFER_NONE,
} output;
enum {
RING_BUFFER_PAGE,
RING_BUFFER_VMAP, /* TODO */
RING_BUFFER_STATIC, /* TODO */
} backend;
enum {
RING_BUFFER_NO_OOPS_CONSISTENCY,
RING_BUFFER_OOPS_CONSISTENCY,
} oops;
enum {
RING_BUFFER_IPI_BARRIER,
RING_BUFFER_NO_IPI_BARRIER,
} ipi;
enum {
RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
RING_BUFFER_WAKEUP_BY_WRITER, /*
* writer wakes up reader,
* not lock-free
* (takes spinlock).
*/
} wakeup;
/*
* tsc_bits: timestamp bits saved at each record.
* 0 and 64 disable the timestamp compression scheme.
*/
unsigned int tsc_bits;
struct lib_ring_buffer_client_cb cb;
};
/*
* ring buffer context
*
* Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
* lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
* lib_ring_buffer_write().
*/
struct lib_ring_buffer_ctx {
/* input received by lib_ring_buffer_reserve(), saved here. */
struct channel *chan; /* channel */
void *priv; /* client private data */
size_t data_size; /* size of payload */
int largest_align; /*
* alignment of the largest element
* in the payload
*/
int cpu; /* processor id */
/* output from lib_ring_buffer_reserve() */
struct lib_ring_buffer *buf; /*
* buffer corresponding to processor id
* for this channel
*/
size_t slot_size; /* size of the reserved slot */
unsigned long buf_offset; /* offset following the record header */
unsigned long pre_offset; /*
* Initial offset position _before_
* the record is written. Positioned
* prior to record header alignment
* padding.
*/
u64 tsc; /* time-stamp counter value */
unsigned int rflags; /* reservation flags */
};
/**
* lib_ring_buffer_ctx_init - initialize ring buffer context
* @ctx: ring buffer context to initialize
* @chan: channel
* @priv: client private data
* @data_size: size of record data payload
* @largest_align: largest alignment within data payload types
* @cpu: processor id
*/
static inline
void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
struct channel *chan, void *priv,
size_t data_size, int largest_align,
int cpu)
{
ctx->chan = chan;
ctx->priv = priv;
ctx->data_size = data_size;
ctx->largest_align = largest_align;
ctx->cpu = cpu;
ctx->rflags = 0;
}
/*
* Reservation flags.
*
* RING_BUFFER_RFLAG_FULL_TSC
*
* This flag is passed to record_header_size() and to the primitive used to
* write the record header. It indicates that the full 64-bit time value is
* needed in the record header. If this flag is not set, the record header needs
* only to contain "tsc_bits" bit of time value.
*
* Reservation flags can be added by the client, starting from
* "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
* record_header_size() to lib_ring_buffer_write_record_header().
*/
#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
#define RING_BUFFER_RFLAG_END (1U << 1)
/*
* We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
* compile-time. We have to duplicate the "config->align" information and the
* definition here because config->align is used both in the slow and fast
* paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
*/
#ifdef RING_BUFFER_ALIGN
# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
/*
* Calculate the offset needed to align the type.
* size_of_type must be non-zero.
*/
static inline
unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
{
return offset_align(align_drift, size_of_type);
}
#else
# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
/*
* Calculate the offset needed to align the type.
* size_of_type must be non-zero.
*/
static inline
unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
{
return 0;
}
#endif
/**
* lib_ring_buffer_align_ctx - Align context offset on "alignment"
* @ctx: ring buffer context.
*/
static inline
void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
size_t alignment)
{
ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
alignment);
}
/*
* lib_ring_buffer_check_config() returns 0 on success.
* Used internally to check for valid configurations at channel creation.
*/
static inline
int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
&& config->sync == RING_BUFFER_SYNC_PER_CPU
&& switch_timer_interval)
return -EINVAL;
return 0;
}
#include "../../wrapper/ringbuffer/vatomic.h"
#endif /* _LINUX_RING_BUFFER_CONFIG_H */

View file

@ -1,228 +0,0 @@
#ifndef _LINUX_RING_BUFFER_FRONTEND_H
#define _LINUX_RING_BUFFER_FRONTEND_H
/*
* linux/ringbuffer/frontend.h
*
* (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring Buffer Library Synchronization Header (API).
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/pipe_fs_i.h>
#include <linux/rcupdate.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/splice.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/cache.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <asm/atomic.h>
#include <asm/local.h>
/* Internal helpers */
#include "../../wrapper/ringbuffer/frontend_internal.h"
/* Buffer creation/removal and setup operations */
/*
* switch_timer_interval is the time interval (in us) to fill sub-buffers with
* padding to let readers get those sub-buffers. Used for live streaming.
*
* read_timer_interval is the time interval (in us) to wake up pending readers.
*
* buf_addr is a pointer the the beginning of the preallocated buffer contiguous
* address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
* be set to NULL for other backends.
*/
extern
struct channel *channel_create(const struct lib_ring_buffer_config *config,
const char *name, void *priv,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
/*
* channel_destroy returns the private data pointer. It finalizes all channel's
* buffers, waits for readers to release all references, and destroys the
* channel.
*/
extern
void *channel_destroy(struct channel *chan);
/* Buffer read operations */
/*
* Iteration on channel cpumask needs to issue a read barrier to match the write
* barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
* buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
* only performed at channel destruction.
*/
#define for_each_channel_cpu(cpu, chan) \
for ((cpu) = -1; \
({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
extern struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
struct channel *chan, int cpu);
extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
/*
* Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
*/
extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
unsigned long *consumed,
unsigned long *produced);
extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
unsigned long consumed_new);
extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
unsigned long consumed);
extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
/*
* lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
* to read sub-buffers sequentially.
*/
static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
{
int ret;
ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
&buf->prod_snapshot);
if (ret)
return ret;
ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
return ret;
}
static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
{
lib_ring_buffer_put_subbuf(buf);
lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
buf->backend.chan));
}
extern void channel_reset(struct channel *chan);
extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
static inline
unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->offset);
}
static inline
unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return atomic_long_read(&buf->consumed);
}
/*
* Must call lib_ring_buffer_is_finalized before reading counters (memory
* ordering enforced with respect to trace teardown).
*/
static inline
int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
int finalized = ACCESS_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
smp_rmb();
return finalized;
}
static inline
int lib_ring_buffer_channel_is_finalized(const struct channel *chan)
{
return chan->finalized;
}
static inline
int lib_ring_buffer_channel_is_disabled(const struct channel *chan)
{
return atomic_read(&chan->record_disabled);
}
static inline
unsigned long lib_ring_buffer_get_read_data_size(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return subbuffer_get_read_data_size(config, &buf->backend);
}
static inline
unsigned long lib_ring_buffer_get_records_count(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->records_count);
}
static inline
unsigned long lib_ring_buffer_get_records_overrun(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->records_overrun);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_full(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_full);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_wrap(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_wrap);
}
static inline
unsigned long lib_ring_buffer_get_records_lost_big(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->records_lost_big);
}
static inline
unsigned long lib_ring_buffer_get_records_read(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
return v_read(config, &buf->backend.records_read);
}
#endif /* _LINUX_RING_BUFFER_FRONTEND_H */

View file

@ -1,358 +0,0 @@
#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
#define _LINUX_RING_BUFFER_FRONTEND_API_H
/*
* linux/ringbuffer/frontend_api.h
*
* (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring Buffer Library Synchronization Header (buffer write API).
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
* See linux/ringbuffer/frontend.h for channel allocation and read-side API.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/frontend.h"
#include <linux/errno.h>
/**
* lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
*
* Disables preemption (acts as a RCU read-side critical section) and keeps a
* ring buffer nesting count as supplementary safety net to ensure tracer client
* code will never trigger an endless recursion. Returns the processor ID on
* success, -EPERM on failure (nesting count too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* that probe side-effects which can cause recursion (e.g. unforeseen traps,
* divisions by 0, ...) are triggered within the incremented nesting count
* section.
*/
static inline
int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
{
int cpu, nesting;
rcu_read_lock_sched_notrace();
cpu = smp_processor_id();
nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
barrier();
if (unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
per_cpu(lib_ring_buffer_nesting, cpu)--;
rcu_read_unlock_sched_notrace();
return -EPERM;
} else
return cpu;
}
/**
* lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
*/
static inline
void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
{
barrier();
__get_cpu_var(lib_ring_buffer_nesting)--;
rcu_read_unlock_sched_notrace();
}
/*
* lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
* part of the API per se.
*
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf = ctx->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
ctx->tsc = lib_ring_buffer_clock_read(chan);
if ((int64_t) ctx->tsc == -EIO)
return 1;
/*
* Prefetch cacheline for read because we have to read the previous
* commit counter to increment it and commit seq value to compare it to
* the commit counter.
*/
prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
> chan->backend.subbuf_size))
return 1;
/*
* Record fits in the current buffer and we are not on a switch
* boundary. It's safe to write.
*/
*o_end = *o_begin + ctx->slot_size;
if (unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
*/
return 1;
return 0;
}
/**
* lib_ring_buffer_reserve - Reserve space in a ring buffer.
* @config: ring buffer instance configuration.
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
* "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
*
* Return :
* 0 on success.
* -EAGAIN if channel is disabled.
* -ENOSPC if event size is too large for packet.
* -ENOBUFS if there is currently not enough space in buffer for the event.
* -EIO if data cannot be written into the buffer for any other reason.
*/
static inline
int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
if (atomic_read(&chan->record_disabled))
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
else
buf = chan->backend.buf;
if (atomic_read(&buf->record_disabled))
return -EAGAIN;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
/*
* Atomically update last_tsc. This update races against concurrent
* atomic updates, but the race will always cause supplementary full TSC
* record headers, never the opposite (missing a full TSC record header
* when it would be needed).
*/
save_last_tsc(config, ctx->buf, ctx->tsc);
/*
* Push the reader if necessary
*/
lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
/*
* Clear noref flag for this subbuffer.
*/
lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
subbuf_index(o_end - 1, chan));
ctx->pre_offset = o_begin;
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
return lib_ring_buffer_reserve_slow(ctx);
}
/**
* lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
* @config: ring buffer instance configuration.
* @buf: buffer
* @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
*
* This operation is completely reentrant : can be called while tracing is
* active with absolutely no lock held.
*
* Note, however, that as a v_cmpxchg is used for some atomic operations and
* requires to be executed locally for per-CPU buffers, this function must be
* called from the CPU which owns the buffer for a ACTIVE flush, with preemption
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf, enum switch_mode mode)
{
lib_ring_buffer_switch_slow(buf, mode);
}
/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
/**
* lib_ring_buffer_commit - Commit an record.
* @config: ring buffer instance configuration.
* @ctx: ring buffer context. (input arguments only)
*
* Atomic unordered slot commit. Increments the commit count in the
* specified sub-buffer, and delivers it if necessary.
*/
static inline
void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
const struct lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf = ctx->buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
/*
* Must count record before incrementing the commit count.
*/
subbuffer_count_record(config, &buf->backend, endidx);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
if (config->ipi == RING_BUFFER_IPI_BARRIER) {
/*
* Must write slot data before incrementing commit count. This
* compiler barrier is upgraded into a smp_mb() by the IPI sent
* by get_subbuf().
*/
barrier();
} else
smp_wmb();
v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
/*
* commit count read can race with concurrent OOO commit count updates.
* This is only needed for lib_ring_buffer_check_deliver (for
* non-polling delivery only) and for
* lib_ring_buffer_write_commit_counter. The race can only cause the
* counter to be read with the same value more than once, which could
* cause :
* - Multiple delivery for the same sub-buffer (which is handled
* gracefully by the reader code) if the value is for a full
* sub-buffer. It's important that we can never miss a sub-buffer
* delivery. Re-reading the value after the v_add ensures this.
* - Reading a commit_count with a higher value that what was actually
* added to it for the lib_ring_buffer_write_commit_counter call
* (again caused by a concurrent committer). It does not matter,
* because this function is interested in the fact that the commit
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
commit_count = v_read(config, &buf->commit_hot[endidx].cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
commit_count, endidx);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
ctx->buf_offset, commit_count,
ctx->slot_size);
}
/**
* lib_ring_buffer_try_discard_reserve - Try discarding a record.
* @config: ring buffer instance configuration.
* @ctx: ring buffer context. (input arguments only)
*
* Only succeeds if no other record has been written after the record to
* discard. If discard fails, the record must be committed to the buffer.
*
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
const struct lib_ring_buffer_ctx *ctx)
{
struct lib_ring_buffer *buf = ctx->buf;
unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
/*
* We need to ensure that if the cmpxchg succeeds and discards the
* record, the next record will record a full TSC, because it cannot
* rely on the last_tsc associated with the discarded record to detect
* overflows. The only way to ensure this is to set the last_tsc to 0
* (assuming no 64-bit TSC overflow), which forces to write a 64-bit
* timestamp in the next record.
*
* Note: if discard fails, we must leave the TSC in the record header.
* It is needed to keep track of TSC overflows for the following
* records.
*/
save_last_tsc(config, buf, 0ULL);
if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
!= end_offset))
return -EPERM;
else
return 0;
}
static inline
void channel_record_disable(const struct lib_ring_buffer_config *config,
struct channel *chan)
{
atomic_inc(&chan->record_disabled);
}
static inline
void channel_record_enable(const struct lib_ring_buffer_config *config,
struct channel *chan)
{
atomic_dec(&chan->record_disabled);
}
static inline
void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
atomic_inc(&buf->record_disabled);
}
static inline
void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
atomic_dec(&buf->record_disabled);
}
#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */

View file

@ -1,424 +0,0 @@
#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
/*
* linux/ringbuffer/frontend_internal.h
*
* (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring Buffer Library Synchronization Header (internal helpers).
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/ringbuffer/frontend_types.h"
#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
/* Buffer offset macros */
/* buf_trunc mask selects only the buffer number. */
static inline
unsigned long buf_trunc(unsigned long offset, struct channel *chan)
{
return offset & ~(chan->backend.buf_size - 1);
}
/* Select the buffer number value (counter). */
static inline
unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
{
return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
}
/* buf_offset mask selects only the offset within the current buffer. */
static inline
unsigned long buf_offset(unsigned long offset, struct channel *chan)
{
return offset & (chan->backend.buf_size - 1);
}
/* subbuf_offset mask selects the offset within the current subbuffer. */
static inline
unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
{
return offset & (chan->backend.subbuf_size - 1);
}
/* subbuf_trunc mask selects the subbuffer number. */
static inline
unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
{
return offset & ~(chan->backend.subbuf_size - 1);
}
/* subbuf_align aligns the offset to the next subbuffer. */
static inline
unsigned long subbuf_align(unsigned long offset, struct channel *chan)
{
return (offset + chan->backend.subbuf_size)
& ~(chan->backend.subbuf_size - 1);
}
/* subbuf_index returns the index of the current subbuffer within the buffer. */
static inline
unsigned long subbuf_index(unsigned long offset, struct channel *chan)
{
return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
}
/*
* Last TSC comparison functions. Check if the current TSC overflows tsc_bits
* bits from the last TSC read. When overflows are detected, the full 64-bit
* timestamp counter should be written in the record header. Reads and writes
* last_tsc atomically.
*/
#if (BITS_PER_LONG == 32)
static inline
void save_last_tsc(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
/*
* Ensure the compiler performs this update in a single instruction.
*/
v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
}
static inline
int last_tsc_overflow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf, u64 tsc)
{
unsigned long tsc_shifted;
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
if (unlikely(tsc_shifted
- (unsigned long)v_read(config, &buf->last_tsc)))
return 1;
else
return 0;
}
#else
static inline
void save_last_tsc(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
v_set(config, &buf->last_tsc, (unsigned long)tsc);
}
static inline
int last_tsc_overflow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
if (unlikely((tsc - v_read(config, &buf->last_tsc))
>> config->tsc_bits))
return 1;
else
return 0;
}
#endif
extern
int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
extern
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
enum switch_mode mode);
/* Buffer write helpers */
static inline
void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset)
{
unsigned long consumed_old, consumed_new;
do {
consumed_old = atomic_long_read(&buf->consumed);
/*
* If buffer is in overwrite mode, push the reader consumed
* count if the write position has reached it and we are not
* at the first iteration (don't push the reader farther than
* the writer). This operation can be done concurrently by many
* writers in the same buffer, the writer being at the farthest
* write position sub-buffer index in the buffer being the one
* which will win this loop.
*/
if (unlikely(subbuf_trunc(offset, chan)
- subbuf_trunc(consumed_old, chan)
>= chan->backend.buf_size))
consumed_new = subbuf_align(consumed_old, chan);
else
return;
} while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
consumed_new) != consumed_old));
}
static inline
void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
v_set(config, &buf->commit_hot[idx].seq, commit_count);
}
static inline
int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
consumed_old = atomic_long_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
* get the data is we are racing. The mb() that ensures correct
* memory order is in get_subbuf.
*/
write_offset = v_read(config, &buf->offset);
/*
* Check that the subbuffer we are trying to consume has been
* already fully committed.
*/
if (((commit_count - chan->backend.subbuf_size)
& chan->commit_count_mask)
- (buf_trunc(consumed_old, chan)
>> chan->backend.num_subbuf_order)
!= 0)
return 0;
/*
* Check that we are not about to read the same subbuffer in
* which the writer head is.
*/
if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
== 0)
return 0;
return 1;
}
static inline
int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
unsigned long idx)
{
return subbuffer_get_data_size(config, &buf->backend, idx);
}
/*
* Check if all space reservation in a buffer have been committed. This helps
* knowing if an execution context is nested (for per-cpu buffers only).
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan)
{
unsigned long offset, idx, commit_count;
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
/*
* Read offset and commit count in a loop so they are both read
* atomically wrt interrupts. By deal with interrupt concurrency by
* restarting both reads if the offset has been pushed. Note that given
* we only have to deal with interrupt concurrency here, an interrupt
* modifying the commit count will also modify "offset", so it is safe
* to only check for offset modifications.
*/
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
commit_count = v_read(config, &buf->commit_hot[idx].cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- (commit_count & chan->commit_count_mask) == 0);
}
static inline
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
u64 tsc;
/* Check if all commits have been done */
if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- (old_commit_count & chan->commit_count_mask) == 0)) {
/*
* If we succeeded at updating cc_sb below, we are the subbuffer
* writer delivering the subbuffer. Deals with concurrent
* updates of the "cc" value without adding a add_return atomic
* operation to the fast path.
*
* We are doing the delivery in two steps:
* - First, we cmpxchg() cc_sb to the new value
* old_commit_count + 1. This ensures that we are the only
* subbuffer user successfully filling the subbuffer, but we
* do _not_ set the cc_sb value to "commit_count" yet.
* Therefore, other writers that would wrap around the ring
* buffer and try to start writing to our subbuffer would
* have to drop records, because it would appear as
* non-filled.
* We therefore have exclusive access to the subbuffer control
* structures. This mutual exclusion with other writers is
* crucially important to perform record overruns count in
* flight recorder mode locklessly.
* - When we are ready to release the subbuffer (either for
* reading or for overrun by other writers), we simply set the
* cc_sb value to "commit_count" and perform delivery.
*
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
* Start of exclusive subbuffer access. We are
* guaranteed to be the last writer in this subbuffer
* and any other writer trying to access this subbuffer
* in this state is required to drop records.
*/
tsc = config->cb.ring_buffer_clock_read(chan);
v_add(config,
subbuffer_get_records_count(config,
&buf->backend, idx),
&buf->records_count);
v_add(config,
subbuffer_count_records_overrun(config,
&buf->backend,
idx),
&buf->records_overrun);
config->cb.buffer_end(buf, tsc, idx,
lib_ring_buffer_get_data_size(config,
buf,
idx));
/*
* Set noref flag and offset for this subbuffer id.
* Contains a memory barrier that ensures counter stores
* are ordered before set noref and offset.
*/
lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
buf_trunc_val(offset, chan));
/*
* Order set_noref and record counter updates before the
* end of subbuffer exclusive access. Orders with
* respect to writers coming into the subbuffer after
* wrap around, and also order wrt concurrent readers.
*/
smp_mb();
/* End of exclusive subbuffer access */
v_set(config, &buf->commit_cold[idx].cc_sb,
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx);
/*
* RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
&& atomic_long_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
wake_up_interruptible(&buf->read_wait);
wake_up_interruptible(&chan->read_wait);
}
}
}
}
/*
* lib_ring_buffer_write_commit_counter
*
* For flight recording. must be called after commit.
* This function increments the subbuffer's commit_seq counter each time the
* commit count reaches back the reserve offset (modulo subbuffer size). It is
* useful for crash dump.
*/
static inline
void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long idx,
unsigned long buf_offset,
unsigned long commit_count,
size_t slot_size)
{
unsigned long offset, commit_seq_old;
if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
return;
offset = buf_offset + slot_size;
/*
* subbuf_offset includes commit_count_mask. We can simply
* compare the offsets within the subbuffer without caring about
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
while ((long) (commit_seq_old - commit_count) < 0)
commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
commit_seq_old, commit_count);
}
extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu);
extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
/* Keep track of trap nesting inside ring buffer code */
DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */

View file

@ -1,176 +0,0 @@
#ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
#define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
/*
* linux/ringbuffer/frontend_types.h
*
* (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring Buffer Library Synchronization Header (types).
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/kref.h>
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend_types.h"
#include "../../wrapper/spinlock.h"
#include "../../lib/prio_heap/lttng_prio_heap.h" /* For per-CPU read-side iterator */
/*
* A switch is done during tracing or as a final flush after tracing (so it
* won't write in the new sub-buffer).
*/
enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
/* channel-level read-side iterator */
struct channel_iter {
/* Prio heap of buffers. Lowest timestamps at the top. */
struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
struct list_head empty_head; /* Empty buffers linked-list head */
int read_open; /* Opened for reading ? */
u64 last_qs; /* Last quiescent state timestamp */
u64 last_timestamp; /* Last timestamp (for WARN_ON) */
int last_cpu; /* Last timestamp cpu */
/*
* read() file operation state.
*/
unsigned long len_left;
};
/* channel: collection of per-cpu ring buffers. */
struct channel {
atomic_t record_disabled;
unsigned long commit_count_mask; /*
* Commit count mask, removing
* the MSBs corresponding to
* bits used to represent the
* subbuffer index.
*/
struct channel_backend backend; /* Associated backend */
unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
uint cpu_hp_enable:1; /* Enable CPU hotplug notif. */
uint hp_iter_enable:1; /* Enable hp iter notif. */
wait_queue_head_t read_wait; /* reader wait queue */
wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
int finalized; /* Has channel been finalized */
struct channel_iter iter; /* Channel read-side iterator */
struct kref ref; /* Reference count */
};
/* Per-subbuffer commit counters used on the hot path */
struct commit_counters_hot {
union v_atomic cc; /* Commit counter */
union v_atomic seq; /* Consecutive commits */
};
/* Per-subbuffer commit counters used only on cold paths */
struct commit_counters_cold {
union v_atomic cc_sb; /* Incremented _once_ at sb switch */
};
/* Per-buffer read iterator */
struct lib_ring_buffer_iter {
u64 timestamp; /* Current record timestamp */
size_t header_len; /* Current record header length */
size_t payload_len; /* Current record payload length */
struct list_head empty_node; /* Linked list of empty buffers */
unsigned long consumed, read_offset, data_size;
enum {
ITER_GET_SUBBUF = 0,
ITER_TEST_RECORD,
ITER_NEXT_RECORD,
ITER_PUT_SUBBUF,
} state;
uint allocated:1;
uint read_open:1; /* Opened for reading ? */
};
/* ring buffer state */
struct lib_ring_buffer {
/* First 32 bytes cache-hot cacheline */
union v_atomic offset; /* Current offset in the buffer */
struct commit_counters_hot *commit_hot;
/* Commit count per sub-buffer */
atomic_long_t consumed; /*
* Current offset in the buffer
* standard atomic access (shared)
*/
atomic_t record_disabled;
/* End of first 32 bytes cacheline */
union v_atomic last_tsc; /*
* Last timestamp written in the buffer.
*/
struct lib_ring_buffer_backend backend; /* Associated backend */
struct commit_counters_cold *commit_cold;
/* Commit count per sub-buffer */
atomic_long_t active_readers; /*
* Active readers count
* standard atomic access (shared)
*/
/* Dropped records */
union v_atomic records_lost_full; /* Buffer full */
union v_atomic records_lost_wrap; /* Nested wrap-around */
union v_atomic records_lost_big; /* Events too big */
union v_atomic records_count; /* Number of records written */
union v_atomic records_overrun; /* Number of overwritten records */
wait_queue_head_t read_wait; /* reader buffer-level wait queue */
wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
int finalized; /* buffer has been finalized */
struct timer_list switch_timer; /* timer for periodical switch */
struct timer_list read_timer; /* timer for read poll */
raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
struct lib_ring_buffer_iter iter; /* read-side iterator */
unsigned long get_subbuf_consumed; /* Read-side consumed */
unsigned long prod_snapshot; /* Producer count snapshot */
unsigned long cons_snapshot; /* Consumer count snapshot */
uint get_subbuf:1; /* Sub-buffer being held by reader */
uint switch_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
uint read_timer_enabled:1; /* Protected by ring_buffer_nohz_lock */
};
static inline
void *channel_get_private(struct channel *chan)
{
return chan->backend.priv;
}
/*
* Issue warnings and disable channels upon internal error.
* Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
* parameters.
*/
#define CHAN_WARN_ON(c, cond) \
({ \
struct channel *__chan; \
int _____ret = unlikely(cond); \
if (_____ret) { \
if (__same_type(*(c), struct channel_backend)) \
__chan = container_of((void *) (c), \
struct channel, \
backend); \
else if (__same_type(*(c), struct channel)) \
__chan = (void *) (c); \
else \
BUG_ON(1); \
atomic_inc(&__chan->record_disabled); \
WARN_ON(1); \
} \
_____ret; \
})
#endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */

View file

@ -1,70 +0,0 @@
#ifndef _LINUX_RING_BUFFER_ITERATOR_H
#define _LINUX_RING_BUFFER_ITERATOR_H
/*
* linux/ringbuffer/iterator.h
*
* (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer and channel iterators.
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
/*
* lib_ring_buffer_get_next_record advances the buffer read position to the next
* record. It returns either the size of the next record, -EAGAIN if there is
* currently no data available, or -ENODATA if no data is available and buffer
* is finalized.
*/
extern ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
struct lib_ring_buffer *buf);
/*
* channel_get_next_record advances the buffer read position to the next record.
* It returns either the size of the next record, -EAGAIN if there is currently
* no data available, or -ENODATA if no data is available and buffer is
* finalized.
* Returns the current buffer in ret_buf.
*/
extern ssize_t channel_get_next_record(struct channel *chan,
struct lib_ring_buffer **ret_buf);
/**
* read_current_record - copy the buffer current record into dest.
* @buf: ring buffer
* @dest: destination where the record should be copied
*
* dest should be large enough to contain the record. Returns the number of
* bytes copied.
*/
static inline size_t read_current_record(struct lib_ring_buffer *buf, void *dest)
{
return lib_ring_buffer_read(&buf->backend, buf->iter.read_offset,
dest, buf->iter.payload_len);
}
extern int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf);
extern void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf);
extern int channel_iterator_open(struct channel *chan);
extern void channel_iterator_release(struct channel *chan);
extern const struct file_operations channel_payload_file_operations;
extern const struct file_operations lib_ring_buffer_payload_file_operations;
/*
* Used internally.
*/
int channel_iterator_init(struct channel *chan);
void channel_iterator_unregister_notifiers(struct channel *chan);
void channel_iterator_free(struct channel *chan);
void channel_iterator_reset(struct channel *chan);
void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf);
#endif /* _LINUX_RING_BUFFER_ITERATOR_H */

View file

@ -1,30 +0,0 @@
#ifndef _LINUX_RING_BUFFER_NOHZ_H
#define _LINUX_RING_BUFFER_NOHZ_H
/*
* ringbuffer/nohz.h
*
* Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifdef CONFIG_LIB_RING_BUFFER
void lib_ring_buffer_tick_nohz_flush(void);
void lib_ring_buffer_tick_nohz_stop(void);
void lib_ring_buffer_tick_nohz_restart(void);
#else
static inline void lib_ring_buffer_tick_nohz_flush(void)
{
}
static inline void lib_ring_buffer_tick_nohz_stop(void)
{
}
static inline void lib_ring_buffer_tick_nohz_restart(void)
{
}
#endif
#endif /* _LINUX_RING_BUFFER_NOHZ_H */

View file

@ -1,854 +0,0 @@
/*
* ring_buffer_backend.c
*
* Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "../../wrapper/ringbuffer/config.h"
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
/**
* lib_ring_buffer_backend_allocate - allocate a channel buffer
* @config: ring buffer instance configuration
* @buf: the buffer struct
* @size: total size of the buffer
* @num_subbuf: number of subbuffers
* @extra_reader_sb: need extra subbuffer for reader
*/
static
int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
size_t size, size_t num_subbuf,
int extra_reader_sb)
{
struct channel_backend *chanb = &bufb->chan->backend;
unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
unsigned long subbuf_size, mmap_offset = 0;
unsigned long num_subbuf_alloc;
struct page **pages;
void **virt;
unsigned long i;
num_pages = size >> PAGE_SHIFT;
num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
subbuf_size = chanb->subbuf_size;
num_subbuf_alloc = num_subbuf;
if (extra_reader_sb) {
num_pages += num_pages_per_subbuf; /* Add pages for reader */
num_subbuf_alloc++;
}
pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!pages))
goto pages_error;
virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!virt))
goto virt_error;
bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
* num_subbuf_alloc,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!bufb->array))
goto array_error;
for (i = 0; i < num_pages; i++) {
pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
GFP_KERNEL | __GFP_ZERO, 0);
if (unlikely(!pages[i]))
goto depopulate;
virt[i] = page_address(pages[i]);
}
bufb->num_pages_per_subbuf = num_pages_per_subbuf;
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
kzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_pages) +
sizeof(struct lib_ring_buffer_backend_page)
* num_pages_per_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
if (!bufb->array[i])
goto free_array;
}
/* Allocate write-side subbuffer table */
bufb->buf_wsb = kzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!bufb->buf_wsb))
goto free_array;
for (i = 0; i < num_subbuf; i++)
bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
/* Assign read-side subbuffer table */
if (extra_reader_sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
num_subbuf_alloc - 1);
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
/* Assign pages to page index */
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < num_pages_per_subbuf; j++) {
CHAN_WARN_ON(chanb, page_idx > num_pages);
bufb->array[i]->p[j].virt = virt[page_idx];
bufb->array[i]->p[j].page = pages[page_idx];
page_idx++;
}
if (config->output == RING_BUFFER_MMAP) {
bufb->array[i]->mmap_offset = mmap_offset;
mmap_offset += subbuf_size;
}
}
/*
* If kmalloc ever uses vmalloc underneath, make sure the buffer pages
* will not fault.
*/
wrapper_vmalloc_sync_all();
kfree(virt);
kfree(pages);
return 0;
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
kfree(bufb->array[i]);
depopulate:
/* Free all allocated pages */
for (i = 0; (i < num_pages && pages[i]); i++)
__free_page(pages[i]);
kfree(bufb->array);
array_error:
kfree(virt);
virt_error:
kfree(pages);
pages_error:
return -ENOMEM;
}
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu)
{
const struct lib_ring_buffer_config *config = chanb->config;
bufb->chan = container_of(chanb, struct channel, backend);
bufb->cpu = cpu;
return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
chanb->num_subbuf,
chanb->extra_reader_sb);
}
void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
unsigned long i, j, num_subbuf_alloc;
num_subbuf_alloc = chanb->num_subbuf;
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
kfree(bufb->buf_wsb);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
__free_page(bufb->array[i]->p[j].page);
kfree(bufb->array[i]);
}
kfree(bufb->array);
bufb->allocated = 0;
}
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
num_subbuf_alloc = chanb->num_subbuf;
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
for (i = 0; i < chanb->num_subbuf; i++)
bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
if (chanb->extra_reader_sb)
bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
num_subbuf_alloc - 1);
else
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
for (i = 0; i < num_subbuf_alloc; i++) {
/* Don't reset mmap_offset */
v_set(config, &bufb->array[i]->records_commit, 0);
v_set(config, &bufb->array[i]->records_unread, 0);
bufb->array[i]->data_size = 0;
/* Don't reset backend page and virt addresses */
}
/* Don't reset num_pages_per_subbuf, cpu, allocated */
v_set(config, &bufb->records_read, 0);
}
/*
* The frontend is responsible for also calling ring_buffer_backend_reset for
* each buffer when calling channel_backend_reset.
*/
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = container_of(chanb, struct channel, backend);
const struct lib_ring_buffer_config *config = chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
* num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
* priv, notifiers, config, cpumask and name.
*/
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
#ifdef CONFIG_HOTPLUG_CPU
/**
* lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
* @nb: notifier block
* @action: hotplug action to take
* @hcpu: CPU number
*
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static
int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct channel_backend *chanb = container_of(nb, struct channel_backend,
cpu_hp_notifier);
const struct lib_ring_buffer_config *config = chanb->config;
struct lib_ring_buffer *buf;
int ret;
CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
buf = per_cpu_ptr(chanb->buf, cpu);
ret = lib_ring_buffer_create(buf, chanb, cpu);
if (ret) {
printk(KERN_ERR
"ring_buffer_cpu_hp_callback: cpu %d "
"buffer creation failed\n", cpu);
return NOTIFY_BAD;
}
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
/* No need to do a buffer switch here, because it will happen
* when tracing is stopped, or will be done by switch timer CPU
* DEAD callback. */
break;
}
return NOTIFY_OK;
}
#endif
/**
* channel_backend_init - initialize a channel backend
* @chanb: channel backend
* @name: channel name
* @config: client ring buffer configuration
* @priv: client private data
* @parent: dentry of parent directory, %NULL for root directory
* @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
* @num_subbuf: number of sub-buffers (power of 2)
*
* Returns channel pointer if successful, %NULL otherwise.
*
* Creates per-cpu channel buffers using the sizes and attributes
* specified. The created channel buffer files will be named
* name_0...name_N-1. File permissions will be %S_IRUSR.
*
* Called with CPU hotplug disabled.
*/
int channel_backend_init(struct channel_backend *chanb,
const char *name,
const struct lib_ring_buffer_config *config,
void *priv, size_t subbuf_size, size_t num_subbuf)
{
struct channel *chan = container_of(chanb, struct channel, backend);
unsigned int i;
int ret;
if (!name)
return -EPERM;
if (!(subbuf_size && num_subbuf))
return -EPERM;
/* Check that the subbuffer size is larger than a page. */
if (subbuf_size < PAGE_SIZE)
return -EINVAL;
/*
* Make sure the number of subbuffers and subbuffer size are power of 2.
*/
CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
ret = subbuffer_id_check_index(config, num_subbuf);
if (ret)
return ret;
chanb->priv = priv;
chanb->buf_size = num_subbuf * subbuf_size;
chanb->subbuf_size = subbuf_size;
chanb->buf_size_order = get_count_order(chanb->buf_size);
chanb->subbuf_size_order = get_count_order(subbuf_size);
chanb->num_subbuf_order = get_count_order(num_subbuf);
chanb->extra_reader_sb =
(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
chanb->num_subbuf = num_subbuf;
strlcpy(chanb->name, name, NAME_MAX);
chanb->config = config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
return -ENOMEM;
}
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
/* Allocating the buffer per-cpu structures */
chanb->buf = alloc_percpu(struct lib_ring_buffer);
if (!chanb->buf)
goto free_cpumask;
/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
* in early initcall, it will not be notified of secondary cpus.
* In that off case, we need to allocate for all possible cpus.
*/
#ifdef CONFIG_HOTPLUG_CPU
/*
* buf->backend.allocated test takes care of concurrent CPU
* hotplug.
* Priority higher than frontend, so we create the ring buffer
* before we start the timer.
*/
chanb->cpu_hp_notifier.notifier_call =
lib_ring_buffer_cpu_hp_callback;
chanb->cpu_hp_notifier.priority = 5;
register_hotcpu_notifier(&chanb->cpu_hp_notifier);
get_online_cpus();
for_each_online_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
chanb, i);
if (ret)
goto free_bufs; /* cpu hotplug locked */
}
put_online_cpus();
#else
for_each_possible_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
chanb, i);
if (ret)
goto free_bufs; /* cpu hotplug locked */
}
#endif
} else {
chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
goto free_cpumask;
ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
if (ret)
goto free_bufs;
}
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
return 0;
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(i) {
struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf);
}
#ifdef CONFIG_HOTPLUG_CPU
put_online_cpus();
#endif
free_percpu(chanb->buf);
} else
kfree(chanb->buf);
free_cpumask:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
free_cpumask_var(chanb->cpumask);
return -ENOMEM;
}
/**
* channel_backend_unregister_notifiers - unregister notifiers
* @chan: the channel
*
* Holds CPU hotplug.
*/
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
{
const struct lib_ring_buffer_config *config = chanb->config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
}
/**
* channel_backend_free - destroy the channel
* @chan: the channel
*
* Destroy all channel buffers and frees the channel.
*/
void channel_backend_free(struct channel_backend *chanb)
{
const struct lib_ring_buffer_config *config = chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(i) {
struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf);
}
free_cpumask_var(chanb->cpumask);
free_percpu(chanb->buf);
} else {
struct lib_ring_buffer *buf = chanb->buf;
CHAN_WARN_ON(chanb, !buf->backend.allocated);
lib_ring_buffer_free(buf);
kfree(buf);
}
}
/**
* lib_ring_buffer_write - write data to a ring_buffer buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @src : source address
* @len : length to write
* @pagecpy : page size copied so far
*/
void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
const void *src, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
do {
len -= pagecpy;
src += pagecpy;
offset += pagecpy;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
/*
* Underlying layer should never ask for writes across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
lib_ring_buffer_do_copy(config,
rpages->p[index].virt
+ (offset & ~PAGE_MASK),
src, pagecpy);
} while (unlikely(len != pagecpy));
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
/**
* lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @c : the byte to write
* @len : length to write
* @pagecpy : page size copied so far
*/
void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
size_t offset,
int c, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
do {
len -= pagecpy;
offset += pagecpy;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
/*
* Underlying layer should never ask for writes across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
lib_ring_buffer_do_memset(rpages->p[index].virt
+ (offset & ~PAGE_MASK),
c, pagecpy);
} while (unlikely(len != pagecpy));
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
/**
* lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @src : source address
* @len : length to write
* @pagecpy : page size copied so far
*
* This function deals with userspace pointers, it should never be called
* directly without having the src pointer checked with access_ok()
* previously.
*/
void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
size_t offset,
const void __user *src, size_t len,
ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int ret;
do {
len -= pagecpy;
src += pagecpy;
offset += pagecpy;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
/*
* Underlying layer should never ask for writes across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
+ (offset & ~PAGE_MASK),
src, pagecpy) != 0;
if (ret > 0) {
offset += (pagecpy - ret);
len -= (pagecpy - ret);
_lib_ring_buffer_memset(bufb, offset, 0, len, 0);
break; /* stop copy */
}
} while (unlikely(len != pagecpy));
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @dest : destination address
* @len : length to copy to destination
*
* Should be protected by get_subbuf/put_subbuf.
* Returns the length copied.
*/
size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t index;
ssize_t pagecpy, orig_len;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
orig_len = len;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
if (unlikely(!len))
return 0;
for (;;) {
pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
pagecpy);
len -= pagecpy;
if (likely(!len))
break;
dest += pagecpy;
offset += pagecpy;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
/*
* Underlying layer should never ask for reads across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
}
return orig_len;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
/**
* __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
* @bufb : buffer backend
* @offset : offset within the buffer
* @dest : destination userspace address
* @len : length to copy to destination
*
* Should be protected by get_subbuf/put_subbuf.
* access_ok() must have been performed on dest addresses prior to call this
* function.
* Returns -EFAULT on error, 0 if ok.
*/
int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
size_t offset, void __user *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t index;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
if (unlikely(!len))
return 0;
for (;;) {
pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
if (__copy_to_user(dest,
rpages->p[index].virt + (offset & ~PAGE_MASK),
pagecpy))
return -EFAULT;
len -= pagecpy;
if (likely(!len))
break;
dest += pagecpy;
offset += pagecpy;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
/*
* Underlying layer should never ask for reads across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
}
return 0;
}
EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
/**
* lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @dest : destination address
* @len : destination's length
*
* return string's length
* Should be protected by get_subbuf/put_subbuf.
*/
int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
size_t index;
ssize_t pagecpy, pagelen, strpagelen, orig_offset;
char *str;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
orig_offset = offset;
for (;;) {
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
strpagelen = strnlen(str, pagelen);
if (len) {
pagecpy = min_t(size_t, len, strpagelen);
if (dest) {
memcpy(dest, str, pagecpy);
dest += pagecpy;
}
len -= pagecpy;
}
offset += strpagelen;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
if (strpagelen < pagelen)
break;
/*
* Underlying layer should never ask for reads across
* subbuffers.
*/
CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
}
if (dest && len)
((char *)dest)[0] = 0;
return offset - orig_offset;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
/**
* lib_ring_buffer_read_get_page - Get a whole page to read from
* @bufb : buffer backend
* @offset : offset within the buffer
* @virt : pointer to page address (output)
*
* Should be protected by get_subbuf/put_subbuf.
* Returns the pointer to the page struct pointer.
*/
struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
size_t offset, void ***virt)
{
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
*virt = &rpages->p[index].virt;
return &rpages->p[index].page;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
/**
* lib_ring_buffer_read_offset_address - get address of a buffer location
* @bufb : buffer backend
* @offset : offset within the buffer.
*
* Return the address where a given offset is located (for read).
* Should be used to get the current subbuffer header pointer. Given we know
* it's never on a page boundary, it's safe to write directly to this address,
* as long as the write is never bigger than a page size.
*/
void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset)
{
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
return rpages->p[index].virt + (offset & ~PAGE_MASK);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
/**
* lib_ring_buffer_offset_address - get address of a location within the buffer
* @bufb : buffer backend
* @offset : offset within the buffer.
*
* Return the address where a given offset is located.
* Should be used to get the current subbuffer header pointer. Given we know
* it's always at the beginning of a page, it's safe to write directly to this
* address, as long as the write is never bigger than a page size.
*/
void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset)
{
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
const struct lib_ring_buffer_config *config = chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
sbidx = offset >> chanb->subbuf_size_order;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
id = bufb->buf_wsb[sbidx].id;
sb_bindex = subbuffer_id_get_index(config, id);
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
return rpages->p[index].virt + (offset & ~PAGE_MASK);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);

File diff suppressed because it is too large Load diff

View file

@ -1,798 +0,0 @@
/*
* ring_buffer_iterator.c
*
* (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring buffer and channel iterators. Get each event of a channel in order. Uses
* a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
* complexity for the "get next event" operation.
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/iterator.h"
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/module.h>
/*
* Safety factor taking into account internal kernel interrupt latency.
* Assuming 250ms worse-case latency.
*/
#define MAX_SYSTEM_LATENCY 250
/*
* Maximum delta expected between trace clocks. At most 1 jiffy delta.
*/
#define MAX_CLOCK_DELTA (jiffies_to_usecs(1) * 1000)
/**
* lib_ring_buffer_get_next_record - Get the next record in a buffer.
* @chan: channel
* @buf: buffer
*
* Returns the size of the event read, -EAGAIN if buffer is empty, -ENODATA if
* buffer is empty and finalized. The buffer must already be opened for reading.
*/
ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
struct lib_ring_buffer *buf)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer_iter *iter = &buf->iter;
int ret;
restart:
switch (iter->state) {
case ITER_GET_SUBBUF:
ret = lib_ring_buffer_get_next_subbuf(buf);
if (ret && !ACCESS_ONCE(buf->finalized)
&& config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
/*
* Use "pull" scheme for global buffers. The reader
* itself flushes the buffer to "pull" data not visible
* to readers yet. Flush current subbuffer and re-try.
*
* Per-CPU buffers rather use a "push" scheme because
* the IPI needed to flush all CPU's buffers is too
* costly. In the "push" scheme, the reader waits for
* the writer periodic deferrable timer to flush the
* buffers (keeping track of a quiescent state
* timestamp). Therefore, the writer "pushes" data out
* of the buffers rather than letting the reader "pull"
* data from the buffer.
*/
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
ret = lib_ring_buffer_get_next_subbuf(buf);
}
if (ret)
return ret;
iter->consumed = buf->cons_snapshot;
iter->data_size = lib_ring_buffer_get_read_data_size(config, buf);
iter->read_offset = iter->consumed;
/* skip header */
iter->read_offset += config->cb.subbuffer_header_size();
iter->state = ITER_TEST_RECORD;
goto restart;
case ITER_TEST_RECORD:
if (iter->read_offset - iter->consumed >= iter->data_size) {
iter->state = ITER_PUT_SUBBUF;
} else {
CHAN_WARN_ON(chan, !config->cb.record_get);
config->cb.record_get(config, chan, buf,
iter->read_offset,
&iter->header_len,
&iter->payload_len,
&iter->timestamp);
iter->read_offset += iter->header_len;
subbuffer_consume_record(config, &buf->backend);
iter->state = ITER_NEXT_RECORD;
return iter->payload_len;
}
goto restart;
case ITER_NEXT_RECORD:
iter->read_offset += iter->payload_len;
iter->state = ITER_TEST_RECORD;
goto restart;
case ITER_PUT_SUBBUF:
lib_ring_buffer_put_next_subbuf(buf);
iter->state = ITER_GET_SUBBUF;
goto restart;
default:
CHAN_WARN_ON(chan, 1); /* Should not happen */
return -EPERM;
}
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_get_next_record);
static int buf_is_higher(void *a, void *b)
{
struct lib_ring_buffer *bufa = a;
struct lib_ring_buffer *bufb = b;
/* Consider lowest timestamps to be at the top of the heap */
return (bufa->iter.timestamp < bufb->iter.timestamp);
}
static
void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config,
struct channel *chan)
{
struct lttng_ptr_heap *heap = &chan->iter.heap;
struct lib_ring_buffer *buf, *tmp;
ssize_t len;
list_for_each_entry_safe(buf, tmp, &chan->iter.empty_head,
iter.empty_node) {
len = lib_ring_buffer_get_next_record(chan, buf);
/*
* Deal with -EAGAIN and -ENODATA.
* len >= 0 means record contains data.
* -EBUSY should never happen, because we support only one
* reader.
*/
switch (len) {
case -EAGAIN:
/* Keep node in empty list */
break;
case -ENODATA:
/*
* Buffer is finalized. Don't add to list of empty
* buffer, because it has no more data to provide, ever.
*/
list_del(&buf->iter.empty_node);
break;
case -EBUSY:
CHAN_WARN_ON(chan, 1);
break;
default:
/*
* Insert buffer into the heap, remove from empty buffer
* list.
*/
CHAN_WARN_ON(chan, len < 0);
list_del(&buf->iter.empty_node);
CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf));
}
}
}
static
void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
struct channel *chan)
{
u64 timestamp_qs;
unsigned long wait_msecs;
/*
* No need to wait if no empty buffers are present.
*/
if (list_empty(&chan->iter.empty_head))
return;
timestamp_qs = config->cb.ring_buffer_clock_read(chan);
/*
* We need to consider previously empty buffers.
* Do a get next buf record on each of them. Add them to
* the heap if they have data. If at least one of them
* don't have data, we need to wait for
* switch_timer_interval + MAX_SYSTEM_LATENCY (so we are sure the
* buffers have been switched either by the timer or idle entry) and
* check them again, adding them if they have data.
*/
lib_ring_buffer_get_empty_buf_records(config, chan);
/*
* No need to wait if no empty buffers are present.
*/
if (list_empty(&chan->iter.empty_head))
return;
/*
* We need to wait for the buffer switch timer to run. If the
* CPU is idle, idle entry performed the switch.
* TODO: we could optimize further by skipping the sleep if all
* empty buffers belong to idle or offline cpus.
*/
wait_msecs = jiffies_to_msecs(chan->switch_timer_interval);
wait_msecs += MAX_SYSTEM_LATENCY;
msleep(wait_msecs);
lib_ring_buffer_get_empty_buf_records(config, chan);
/*
* Any buffer still in the empty list here cannot possibly
* contain an event with a timestamp prior to "timestamp_qs".
* The new quiescent state timestamp is the one we grabbed
* before waiting for buffer data. It is therefore safe to
* ignore empty buffers up to last_qs timestamp for fusion
* merge.
*/
chan->iter.last_qs = timestamp_qs;
}
/**
* channel_get_next_record - Get the next record in a channel.
* @chan: channel
* @ret_buf: the buffer in which the event is located (output)
*
* Returns the size of new current event, -EAGAIN if all buffers are empty,
* -ENODATA if all buffers are empty and finalized. The channel must already be
* opened for reading.
*/
ssize_t channel_get_next_record(struct channel *chan,
struct lib_ring_buffer **ret_buf)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
struct lttng_ptr_heap *heap;
ssize_t len;
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
*ret_buf = channel_get_ring_buffer(config, chan, 0);
return lib_ring_buffer_get_next_record(chan, *ret_buf);
}
heap = &chan->iter.heap;
/*
* get next record for topmost buffer.
*/
buf = lttng_heap_maximum(heap);
if (buf) {
len = lib_ring_buffer_get_next_record(chan, buf);
/*
* Deal with -EAGAIN and -ENODATA.
* len >= 0 means record contains data.
*/
switch (len) {
case -EAGAIN:
buf->iter.timestamp = 0;
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
/* Remove topmost buffer from the heap */
CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
break;
case -ENODATA:
/*
* Buffer is finalized. Remove buffer from heap and
* don't add to list of empty buffer, because it has no
* more data to provide, ever.
*/
CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf);
break;
case -EBUSY:
CHAN_WARN_ON(chan, 1);
break;
default:
/*
* Reinsert buffer into the heap. Note that heap can be
* partially empty, so we need to use
* lttng_heap_replace_max().
*/
CHAN_WARN_ON(chan, len < 0);
CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf);
break;
}
}
buf = lttng_heap_maximum(heap);
if (!buf || buf->iter.timestamp > chan->iter.last_qs) {
/*
* Deal with buffers previously showing no data.
* Add buffers containing data to the heap, update
* last_qs.
*/
lib_ring_buffer_wait_for_qs(config, chan);
}
*ret_buf = buf = lttng_heap_maximum(heap);
if (buf) {
/*
* If this warning triggers, you probably need to check your
* system interrupt latency. Typical causes: too many printk()
* output going to a serial console with interrupts off.
* Allow for MAX_CLOCK_DELTA ns timestamp delta going backward.
* Observed on SMP KVM setups with trace_clock().
*/
if (chan->iter.last_timestamp
> (buf->iter.timestamp + MAX_CLOCK_DELTA)) {
printk(KERN_WARNING "ring_buffer: timestamps going "
"backward. Last time %llu ns, cpu %d, "
"current time %llu ns, cpu %d, "
"delta %llu ns.\n",
chan->iter.last_timestamp, chan->iter.last_cpu,
buf->iter.timestamp, buf->backend.cpu,
chan->iter.last_timestamp - buf->iter.timestamp);
CHAN_WARN_ON(chan, 1);
}
chan->iter.last_timestamp = buf->iter.timestamp;
chan->iter.last_cpu = buf->backend.cpu;
return buf->iter.payload_len;
} else {
/* Heap is empty */
if (list_empty(&chan->iter.empty_head))
return -ENODATA; /* All buffers finalized */
else
return -EAGAIN; /* Temporarily empty */
}
}
EXPORT_SYMBOL_GPL(channel_get_next_record);
static
void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer *buf)
{
if (buf->iter.allocated)
return;
buf->iter.allocated = 1;
if (chan->iter.read_open && !buf->iter.read_open) {
CHAN_WARN_ON(chan, lib_ring_buffer_open_read(buf) != 0);
buf->iter.read_open = 1;
}
/* Add to list of buffers without any current record */
if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
}
#ifdef CONFIG_HOTPLUG_CPU
static
int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct channel *chan = container_of(nb, struct channel,
hp_iter_notifier);
struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
const struct lib_ring_buffer_config *config = chan->backend.config;
if (!chan->hp_iter_enable)
return NOTIFY_DONE;
CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
switch (action) {
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
lib_ring_buffer_iterator_init(chan, buf);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
#endif
int channel_iterator_init(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
int cpu, ret;
INIT_LIST_HEAD(&chan->iter.empty_head);
ret = lttng_heap_init(&chan->iter.heap,
num_possible_cpus(),
GFP_KERNEL, buf_is_higher);
if (ret)
return ret;
/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
* in early initcall, it will not be notified of secondary cpus.
* In that off case, we need to allocate for all possible cpus.
*/
#ifdef CONFIG_HOTPLUG_CPU
chan->hp_iter_notifier.notifier_call =
channel_iterator_cpu_hotplug;
chan->hp_iter_notifier.priority = 10;
register_cpu_notifier(&chan->hp_iter_notifier);
get_online_cpus();
for_each_online_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
lib_ring_buffer_iterator_init(chan, buf);
}
chan->hp_iter_enable = 1;
put_online_cpus();
#else
for_each_possible_cpu(cpu) {
buf = per_cpu_ptr(chan->backend.buf, cpu);
lib_ring_buffer_iterator_init(chan, buf);
}
#endif
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_init(chan, buf);
}
return 0;
}
void channel_iterator_unregister_notifiers(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
chan->hp_iter_enable = 0;
unregister_cpu_notifier(&chan->hp_iter_notifier);
}
}
void channel_iterator_free(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
lttng_heap_free(&chan->iter.heap);
}
int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
return lib_ring_buffer_open_read(buf);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_open);
/*
* Note: Iterators must not be mixed with other types of outputs, because an
* iterator can leave the buffer in "GET" state, which is not consistent with
* other types of output (mmap, splice, raw data read).
*/
void lib_ring_buffer_iterator_release(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
int channel_iterator_open(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
int ret = 0, cpu;
CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
get_online_cpus();
/* Allow CPU hotplug to keep track of opened reader */
chan->iter.read_open = 1;
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(config, chan, cpu);
ret = lib_ring_buffer_iterator_open(buf);
if (ret)
goto error;
buf->iter.read_open = 1;
}
put_online_cpus();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
ret = lib_ring_buffer_iterator_open(buf);
}
return ret;
error:
/* Error should always happen on CPU 0, hence no close is required. */
CHAN_WARN_ON(chan, cpu != 0);
put_online_cpus();
return ret;
}
EXPORT_SYMBOL_GPL(channel_iterator_open);
void channel_iterator_release(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
get_online_cpus();
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(config, chan, cpu);
if (buf->iter.read_open) {
lib_ring_buffer_iterator_release(buf);
buf->iter.read_open = 0;
}
}
chan->iter.read_open = 0;
put_online_cpus();
} else {
buf = channel_get_ring_buffer(config, chan, 0);
lib_ring_buffer_iterator_release(buf);
}
}
EXPORT_SYMBOL_GPL(channel_iterator_release);
void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
{
struct channel *chan = buf->backend.chan;
if (buf->iter.state != ITER_GET_SUBBUF)
lib_ring_buffer_put_next_subbuf(buf);
buf->iter.state = ITER_GET_SUBBUF;
/* Remove from heap (if present). */
if (lttng_heap_cherrypick(&chan->iter.heap, buf))
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
buf->iter.timestamp = 0;
buf->iter.header_len = 0;
buf->iter.payload_len = 0;
buf->iter.consumed = 0;
buf->iter.read_offset = 0;
buf->iter.data_size = 0;
/* Don't reset allocated and read_open */
}
void channel_iterator_reset(struct channel *chan)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
int cpu;
/* Empty heap, put into empty_head */
while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL)
list_add(&buf->iter.empty_node, &chan->iter.empty_head);
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(config, chan, cpu);
lib_ring_buffer_iterator_reset(buf);
}
/* Don't reset read_open */
chan->iter.last_qs = 0;
chan->iter.last_timestamp = 0;
chan->iter.last_cpu = 0;
chan->iter.len_left = 0;
}
/*
* Ring buffer payload extraction read() implementation.
*/
static
ssize_t channel_ring_buffer_file_read(struct file *filp,
char __user *user_buf,
size_t count,
loff_t *ppos,
struct channel *chan,
struct lib_ring_buffer *buf,
int fusionmerge)
{
const struct lib_ring_buffer_config *config = chan->backend.config;
size_t read_count = 0, read_offset;
ssize_t len;
might_sleep();
if (!access_ok(VERIFY_WRITE, user_buf, count))
return -EFAULT;
/* Finish copy of previous record */
if (*ppos != 0) {
if (read_count < count) {
len = chan->iter.len_left;
read_offset = *ppos;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU
&& fusionmerge)
buf = lttng_heap_maximum(&chan->iter.heap);
CHAN_WARN_ON(chan, !buf);
goto skip_get_next;
}
}
while (read_count < count) {
size_t copy_len, space_left;
if (fusionmerge)
len = channel_get_next_record(chan, &buf);
else
len = lib_ring_buffer_get_next_record(chan, buf);
len_test:
if (len < 0) {
/*
* Check if buffer is finalized (end of file).
*/
if (len == -ENODATA) {
/* A 0 read_count will tell about end of file */
goto nodata;
}
if (filp->f_flags & O_NONBLOCK) {
if (!read_count)
read_count = -EAGAIN;
goto nodata;
} else {
int error;
/*
* No data available at the moment, return what
* we got.
*/
if (read_count)
goto nodata;
/*
* Wait for returned len to be >= 0 or -ENODATA.
*/
if (fusionmerge)
error = wait_event_interruptible(
chan->read_wait,
((len = channel_get_next_record(chan,
&buf)), len != -EAGAIN));
else
error = wait_event_interruptible(
buf->read_wait,
((len = lib_ring_buffer_get_next_record(
chan, buf)), len != -EAGAIN));
CHAN_WARN_ON(chan, len == -EBUSY);
if (error) {
read_count = error;
goto nodata;
}
CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
goto len_test;
}
}
read_offset = buf->iter.read_offset;
skip_get_next:
space_left = count - read_count;
if (len <= space_left) {
copy_len = len;
chan->iter.len_left = 0;
*ppos = 0;
} else {
copy_len = space_left;
chan->iter.len_left = len - copy_len;
*ppos = read_offset + copy_len;
}
if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
&user_buf[read_count],
copy_len)) {
/*
* Leave the len_left and ppos values at their current
* state, as we currently have a valid event to read.
*/
return -EFAULT;
}
read_count += copy_len;
};
return read_count;
nodata:
*ppos = 0;
chan->iter.len_left = 0;
return read_count;
}
/**
* lib_ring_buffer_file_read - Read buffer record payload.
* @filp: file structure pointer.
* @buffer: user buffer to read data into.
* @count: number of bytes to read.
* @ppos: file read position.
*
* Returns a negative value on error, or the number of bytes read on success.
* ppos is used to save the position _within the current record_ between calls
* to read().
*/
static
ssize_t lib_ring_buffer_file_read(struct file *filp,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
struct lib_ring_buffer *buf = inode->i_private;
struct channel *chan = buf->backend.chan;
return channel_ring_buffer_file_read(filp, user_buf, count, ppos,
chan, buf, 0);
}
/**
* channel_file_read - Read channel record payload.
* @filp: file structure pointer.
* @buffer: user buffer to read data into.
* @count: number of bytes to read.
* @ppos: file read position.
*
* Returns a negative value on error, or the number of bytes read on success.
* ppos is used to save the position _within the current record_ between calls
* to read().
*/
static
ssize_t channel_file_read(struct file *filp,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct inode *inode = filp->f_dentry->d_inode;
struct channel *chan = inode->i_private;
const struct lib_ring_buffer_config *config = chan->backend.config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, NULL, 1);
else {
struct lib_ring_buffer *buf =
channel_get_ring_buffer(config, chan, 0);
return channel_ring_buffer_file_read(filp, user_buf, count,
ppos, chan, buf, 0);
}
}
static
int lib_ring_buffer_file_open(struct inode *inode, struct file *file)
{
struct lib_ring_buffer *buf = inode->i_private;
int ret;
ret = lib_ring_buffer_iterator_open(buf);
if (ret)
return ret;
file->private_data = buf;
ret = nonseekable_open(inode, file);
if (ret)
goto release_iter;
return 0;
release_iter:
lib_ring_buffer_iterator_release(buf);
return ret;
}
static
int lib_ring_buffer_file_release(struct inode *inode, struct file *file)
{
struct lib_ring_buffer *buf = inode->i_private;
lib_ring_buffer_iterator_release(buf);
return 0;
}
static
int channel_file_open(struct inode *inode, struct file *file)
{
struct channel *chan = inode->i_private;
int ret;
ret = channel_iterator_open(chan);
if (ret)
return ret;
file->private_data = chan;
ret = nonseekable_open(inode, file);
if (ret)
goto release_iter;
return 0;
release_iter:
channel_iterator_release(chan);
return ret;
}
static
int channel_file_release(struct inode *inode, struct file *file)
{
struct channel *chan = inode->i_private;
channel_iterator_release(chan);
return 0;
}
const struct file_operations channel_payload_file_operations = {
.owner = THIS_MODULE,
.open = channel_file_open,
.release = channel_file_release,
.read = channel_file_read,
.llseek = lib_ring_buffer_no_llseek,
};
EXPORT_SYMBOL_GPL(channel_payload_file_operations);
const struct file_operations lib_ring_buffer_payload_file_operations = {
.owner = THIS_MODULE,
.open = lib_ring_buffer_file_open,
.release = lib_ring_buffer_file_release,
.read = lib_ring_buffer_file_read,
.llseek = lib_ring_buffer_no_llseek,
};
EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);

View file

@ -1,109 +0,0 @@
/*
* ring_buffer_mmap.c
*
* Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
* Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
* Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Re-using content from kernel/relay.c.
*
* This file is released under the GPL v2.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
#include "../../wrapper/ringbuffer/vfs.h"
/*
* fault() vm_op implementation for ring buffer file mapping.
*/
static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct lib_ring_buffer *buf = vma->vm_private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
pgoff_t pgoff = vmf->pgoff;
struct page **page;
void **virt;
unsigned long offset, sb_bindex;
/*
* Verify that faults are only done on the range of pages owned by the
* reader.
*/
offset = pgoff << PAGE_SHIFT;
sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
&& offset < buf->backend.array[sb_bindex]->mmap_offset +
buf->backend.chan->backend.subbuf_size))
return VM_FAULT_SIGBUS;
/*
* ring_buffer_read_get_page() gets the page in the current reader's
* pages.
*/
page = lib_ring_buffer_read_get_page(&buf->backend, offset, &virt);
if (!*page)
return VM_FAULT_SIGBUS;
get_page(*page);
vmf->page = *page;
return 0;
}
/*
* vm_ops for ring buffer file mappings.
*/
static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
.fault = lib_ring_buffer_fault,
};
/**
* lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
* @buf: ring buffer to map
* @vma: vm_area_struct describing memory to be mapped
*
* Returns 0 if ok, negative on error
*
* Caller should already have grabbed mmap_sem.
*/
static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
struct vm_area_struct *vma)
{
unsigned long length = vma->vm_end - vma->vm_start;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
unsigned long mmap_buf_len;
if (config->output != RING_BUFFER_MMAP)
return -EINVAL;
mmap_buf_len = chan->backend.buf_size;
if (chan->backend.extra_reader_sb)
mmap_buf_len += chan->backend.subbuf_size;
if (length != mmap_buf_len)
return -EINVAL;
vma->vm_ops = &lib_ring_buffer_mmap_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = buf;
return 0;
}
/**
* lib_ring_buffer_mmap - mmap file op
* @filp: the file
* @vma: the vma describing what to map
*
* Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
*/
int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct lib_ring_buffer *buf = filp->private_data;
return lib_ring_buffer_mmap_buf(buf, vma);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);

View file

@ -1,202 +0,0 @@
/*
* ring_buffer_splice.c
*
* Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
* Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
* Copyright (C) 2008-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Re-using content from kernel/relay.c.
*
* This file is released under the GPL v2.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include "../../wrapper/splice.h"
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
#include "../../wrapper/ringbuffer/vfs.h"
#if 0
#define printk_dbg(fmt, args...) printk(fmt, args)
#else
#define printk_dbg(fmt, args...)
#endif
loff_t lib_ring_buffer_no_llseek(struct file *file, loff_t offset, int origin)
{
return -ESPIPE;
}
/*
* Release pages from the buffer so splice pipe_to_file can move them.
* Called after the pipe has been populated with buffer pages.
*/
static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *pbuf)
{
__free_page(pbuf->page);
}
static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = lib_ring_buffer_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
/*
* Page release operation after splice pipe_to_file ends.
*/
static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
unsigned int i)
{
__free_page(spd->pages[i]);
}
/*
* subbuf_splice_actor - splice up to one subbuf's worth of data
*/
static int subbuf_splice_actor(struct file *in,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
struct lib_ring_buffer *buf = in->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
unsigned int poff, subbuf_pages, nr_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
.pages = pages,
.nr_pages = 0,
.partial = partial,
.flags = flags,
.ops = &ring_buffer_pipe_buf_ops,
.spd_release = lib_ring_buffer_page_release,
};
unsigned long consumed_old, roffset;
unsigned long bytes_avail;
/*
* Check that a GET_SUBBUF ioctl has been done before.
*/
WARN_ON(atomic_long_read(&buf->active_readers) != 1);
consumed_old = lib_ring_buffer_get_consumed(config, buf);
consumed_old += *ppos;
/*
* Adjust read len, if longer than what is available.
* Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
* protection.
*/
bytes_avail = chan->backend.subbuf_size;
WARN_ON(bytes_avail > chan->backend.buf_size);
len = min_t(size_t, len, bytes_avail);
subbuf_pages = bytes_avail >> PAGE_SHIFT;
nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
roffset = consumed_old & PAGE_MASK;
poff = consumed_old & ~PAGE_MASK;
printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
unsigned int this_len;
struct page **page, *new_page;
void **virt;
if (!len)
break;
printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
len, roffset);
/*
* We have to replace the page we are moving into the splice
* pipe.
*/
new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
0)),
GFP_KERNEL | __GFP_ZERO, 0);
if (!new_page)
break;
this_len = PAGE_SIZE - poff;
page = lib_ring_buffer_read_get_page(&buf->backend, roffset, &virt);
spd.pages[spd.nr_pages] = *page;
*page = new_page;
*virt = page_address(new_page);
spd.partial[spd.nr_pages].offset = poff;
spd.partial[spd.nr_pages].len = this_len;
poff = 0;
roffset += PAGE_SIZE;
len -= this_len;
}
if (!spd.nr_pages)
return 0;
return wrapper_splice_to_pipe(pipe, &spd);
}
ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct lib_ring_buffer *buf = in->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
ssize_t spliced;
int ret;
if (config->output != RING_BUFFER_SPLICE)
return -EINVAL;
/*
* We require ppos and length to be page-aligned for performance reasons
* (no page copy). Size is known using the ioctl
* RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
* We fail when the ppos or len passed is not page-sized, because splice
* is not allowed to copy more than the length passed as parameter (so
* the ABI does not let us silently copy more than requested to include
* padding).
*/
if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
return -EINVAL;
ret = 0;
spliced = 0;
printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
(ssize_t)*ppos);
while (len && !spliced) {
ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
if (ret < 0)
break;
else if (!ret) {
if (flags & SPLICE_F_NONBLOCK)
ret = -EAGAIN;
break;
}
*ppos += ret;
if (ret > len)
len = 0;
else
len -= ret;
spliced += ret;
}
if (spliced)
return spliced;
return ret;
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);

View file

@ -1,390 +0,0 @@
/*
* ring_buffer_vfs.c
*
* Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Ring Buffer VFS file operations.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include "../../wrapper/ringbuffer/backend.h"
#include "../../wrapper/ringbuffer/frontend.h"
#include "../../wrapper/ringbuffer/vfs.h"
#include "../../wrapper/poll.h"
static int put_ulong(unsigned long val, unsigned long arg)
{
return put_user(val, (unsigned long __user *)arg);
}
#ifdef CONFIG_COMPAT
static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
{
return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
}
#endif
/**
* lib_ring_buffer_open - ring buffer open file operation
* @inode: opened inode
* @file: opened file
*
* Open implementation. Makes sure only one open instance of a buffer is
* done at a given moment.
*/
int lib_ring_buffer_open(struct inode *inode, struct file *file)
{
struct lib_ring_buffer *buf = inode->i_private;
int ret;
if (!buf)
return -EINVAL;
ret = lib_ring_buffer_open_read(buf);
if (ret)
return ret;
file->private_data = buf;
ret = nonseekable_open(inode, file);
if (ret)
goto release_read;
return 0;
release_read:
lib_ring_buffer_release_read(buf);
return ret;
}
/**
* lib_ring_buffer_release - ring buffer release file operation
* @inode: opened inode
* @file: opened file
*
* Release implementation.
*/
int lib_ring_buffer_release(struct inode *inode, struct file *file)
{
struct lib_ring_buffer *buf = file->private_data;
lib_ring_buffer_release_read(buf);
return 0;
}
/**
* lib_ring_buffer_poll - ring buffer poll file operation
* @filp: the file
* @wait: poll table
*
* Poll implementation.
*/
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
{
unsigned int mask = 0;
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
int finalized, disabled;
if (filp->f_mode & FMODE_READ) {
poll_wait_set_exclusive(wait);
poll_wait(filp, &buf->read_wait, wait);
finalized = lib_ring_buffer_is_finalized(config, buf);
disabled = lib_ring_buffer_channel_is_disabled(chan);
/*
* lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
* finalized load before offsets loads.
*/
WARN_ON(atomic_long_read(&buf->active_readers) != 1);
retry:
if (disabled)
return POLLERR;
if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
- subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
== 0) {
if (finalized)
return POLLHUP;
else {
/*
* The memory barriers
* __wait_event()/wake_up_interruptible() take
* care of "raw_spin_is_locked" memory ordering.
*/
if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
goto retry;
else
return 0;
}
} else {
if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
chan)
- subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
chan)
>= chan->backend.buf_size)
return POLLPRI | POLLRDBAND;
else
return POLLIN | POLLRDNORM;
}
}
return mask;
}
/**
* lib_ring_buffer_ioctl - control ring buffer reader synchronization
*
* @filp: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements commands necessary for producer/consumer
* and flight recorder reader interaction :
* RING_BUFFER_GET_NEXT_SUBBUF
* Get the next sub-buffer that can be read. It never blocks.
* RING_BUFFER_PUT_NEXT_SUBBUF
* Release the currently read sub-buffer.
* RING_BUFFER_GET_SUBBUF_SIZE
* returns the size of the current sub-buffer.
* RING_BUFFER_GET_MAX_SUBBUF_SIZE
* returns the maximum size for sub-buffers.
* RING_BUFFER_GET_NUM_SUBBUF
* returns the number of reader-visible sub-buffers in the per cpu
* channel (for mmap).
* RING_BUFFER_GET_MMAP_READ_OFFSET
* returns the offset of the subbuffer belonging to the reader.
* Should only be used for mmap clients.
*/
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
switch (cmd) {
case RING_BUFFER_SNAPSHOT:
return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
&buf->prod_snapshot);
case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
return put_ulong(buf->cons_snapshot, arg);
case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
return put_ulong(buf->prod_snapshot, arg);
case RING_BUFFER_GET_SUBBUF:
{
unsigned long uconsume;
long ret;
ret = get_user(uconsume, (unsigned long __user *) arg);
if (ret)
return ret; /* will return -EFAULT */
ret = lib_ring_buffer_get_subbuf(buf, uconsume);
if (!ret) {
/* Set file position to zero at each successful "get" */
filp->f_pos = 0;
}
return ret;
}
case RING_BUFFER_PUT_SUBBUF:
lib_ring_buffer_put_subbuf(buf);
return 0;
case RING_BUFFER_GET_NEXT_SUBBUF:
{
long ret;
ret = lib_ring_buffer_get_next_subbuf(buf);
if (!ret) {
/* Set file position to zero at each successful "get" */
filp->f_pos = 0;
}
return ret;
}
case RING_BUFFER_PUT_NEXT_SUBBUF:
lib_ring_buffer_put_next_subbuf(buf);
return 0;
case RING_BUFFER_GET_SUBBUF_SIZE:
return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
arg);
case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
{
unsigned long size;
size = lib_ring_buffer_get_read_data_size(config, buf);
size = PAGE_ALIGN(size);
return put_ulong(size, arg);
}
case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
return put_ulong(chan->backend.subbuf_size, arg);
case RING_BUFFER_GET_MMAP_LEN:
{
unsigned long mmap_buf_len;
if (config->output != RING_BUFFER_MMAP)
return -EINVAL;
mmap_buf_len = chan->backend.buf_size;
if (chan->backend.extra_reader_sb)
mmap_buf_len += chan->backend.subbuf_size;
if (mmap_buf_len > INT_MAX)
return -EFBIG;
return put_ulong(mmap_buf_len, arg);
}
case RING_BUFFER_GET_MMAP_READ_OFFSET:
{
unsigned long sb_bindex;
if (config->output != RING_BUFFER_MMAP)
return -EINVAL;
sb_bindex = subbuffer_id_get_index(config,
buf->backend.buf_rsb.id);
return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
arg);
}
case RING_BUFFER_FLUSH:
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
return 0;
default:
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMPAT
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct lib_ring_buffer *buf = filp->private_data;
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = chan->backend.config;
if (lib_ring_buffer_channel_is_disabled(chan))
return -EIO;
switch (cmd) {
case RING_BUFFER_SNAPSHOT:
return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
&buf->prod_snapshot);
case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
return compat_put_ulong(buf->cons_snapshot, arg);
case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
return compat_put_ulong(buf->prod_snapshot, arg);
case RING_BUFFER_GET_SUBBUF:
{
__u32 uconsume;
unsigned long consume;
long ret;
ret = get_user(uconsume, (__u32 __user *) arg);
if (ret)
return ret; /* will return -EFAULT */
consume = buf->cons_snapshot;
consume &= ~0xFFFFFFFFL;
consume |= uconsume;
ret = lib_ring_buffer_get_subbuf(buf, consume);
if (!ret) {
/* Set file position to zero at each successful "get" */
filp->f_pos = 0;
}
return ret;
}
case RING_BUFFER_PUT_SUBBUF:
lib_ring_buffer_put_subbuf(buf);
return 0;
case RING_BUFFER_GET_NEXT_SUBBUF:
{
long ret;
ret = lib_ring_buffer_get_next_subbuf(buf);
if (!ret) {
/* Set file position to zero at each successful "get" */
filp->f_pos = 0;
}
return ret;
}
case RING_BUFFER_PUT_NEXT_SUBBUF:
lib_ring_buffer_put_next_subbuf(buf);
return 0;
case RING_BUFFER_GET_SUBBUF_SIZE:
{
unsigned long data_size;
data_size = lib_ring_buffer_get_read_data_size(config, buf);
if (data_size > UINT_MAX)
return -EFBIG;
return put_ulong(data_size, arg);
}
case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
{
unsigned long size;
size = lib_ring_buffer_get_read_data_size(config, buf);
size = PAGE_ALIGN(size);
if (size > UINT_MAX)
return -EFBIG;
return put_ulong(size, arg);
}
case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
if (chan->backend.subbuf_size > UINT_MAX)
return -EFBIG;
return put_ulong(chan->backend.subbuf_size, arg);
case RING_BUFFER_GET_MMAP_LEN:
{
unsigned long mmap_buf_len;
if (config->output != RING_BUFFER_MMAP)
return -EINVAL;
mmap_buf_len = chan->backend.buf_size;
if (chan->backend.extra_reader_sb)
mmap_buf_len += chan->backend.subbuf_size;
if (mmap_buf_len > UINT_MAX)
return -EFBIG;
return put_ulong(mmap_buf_len, arg);
}
case RING_BUFFER_GET_MMAP_READ_OFFSET:
{
unsigned long sb_bindex, read_offset;
if (config->output != RING_BUFFER_MMAP)
return -EINVAL;
sb_bindex = subbuffer_id_get_index(config,
buf->backend.buf_rsb.id);
read_offset = buf->backend.array[sb_bindex]->mmap_offset;
if (read_offset > UINT_MAX)
return -EINVAL;
return put_ulong(read_offset, arg);
}
case RING_BUFFER_FLUSH:
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
return 0;
default:
return -ENOIOCTLCMD;
}
}
#endif
const struct file_operations lib_ring_buffer_file_operations = {
.owner = THIS_MODULE,
.open = lib_ring_buffer_open,
.release = lib_ring_buffer_release,
.poll = lib_ring_buffer_poll,
.splice_read = lib_ring_buffer_splice_read,
.mmap = lib_ring_buffer_mmap,
.unlocked_ioctl = lib_ring_buffer_ioctl,
.llseek = lib_ring_buffer_no_llseek,
#ifdef CONFIG_COMPAT
.compat_ioctl = lib_ring_buffer_compat_ioctl,
#endif
};
EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Ring Buffer Library VFS");

View file

@ -1,85 +0,0 @@
#ifndef _LINUX_RING_BUFFER_VATOMIC_H
#define _LINUX_RING_BUFFER_VATOMIC_H
/*
* linux/ringbuffer/vatomic.h
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <asm/atomic.h>
#include <asm/local.h>
/*
* Same data type (long) accessed differently depending on configuration.
* v field is for non-atomic access (protected by mutual exclusion).
* In the fast-path, the ring_buffer_config structure is constant, so the
* compiler can statically select the appropriate branch.
* local_t is used for per-cpu and per-thread buffers.
* atomic_long_t is used for globally shared buffers.
*/
union v_atomic {
local_t l;
atomic_long_t a;
long v;
};
static inline
long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
return local_read(&v_a->l);
else
return atomic_long_read(&v_a->a);
}
static inline
void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
long v)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
local_set(&v_a->l, v);
else
atomic_long_set(&v_a->a, v);
}
static inline
void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
local_add(v, &v_a->l);
else
atomic_long_add(v, &v_a->a);
}
static inline
void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
local_inc(&v_a->l);
else
atomic_long_inc(&v_a->a);
}
/*
* Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
*/
static inline
void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
{
--v_a->v;
}
static inline
long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
long old, long _new)
{
if (config->sync == RING_BUFFER_SYNC_PER_CPU)
return local_cmpxchg(&v_a->l, old, _new);
else
return atomic_long_cmpxchg(&v_a->a, old, _new);
}
#endif /* _LINUX_RING_BUFFER_VATOMIC_H */

View file

@ -1,89 +0,0 @@
#ifndef _LINUX_RING_BUFFER_VFS_H
#define _LINUX_RING_BUFFER_VFS_H
/*
* linux/ringbuffer/vfs.h
*
* (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Wait-free ring buffer VFS file operations.
*
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/fs.h>
#include <linux/poll.h>
/* VFS API */
extern const struct file_operations lib_ring_buffer_file_operations;
/*
* Internal file operations.
*/
int lib_ring_buffer_open(struct inode *inode, struct file *file);
int lib_ring_buffer_release(struct inode *inode, struct file *file);
unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait);
ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma);
/* Ring Buffer ioctl() and ioctl numbers */
long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
/*
* Use RING_BUFFER_GET_NEXT_SUBBUF / RING_BUFFER_PUT_NEXT_SUBBUF to read and
* consume sub-buffers sequentially.
*
* Reading sub-buffers without consuming them can be performed with:
*
* RING_BUFFER_SNAPSHOT
* RING_BUFFER_SNAPSHOT_GET_CONSUMED
* RING_BUFFER_SNAPSHOT_GET_PRODUCED
*
* to get the offset range to consume, and then by passing each sub-buffer
* offset to RING_BUFFER_GET_SUBBUF, read the sub-buffer, and then release it
* with RING_BUFFER_PUT_SUBBUF.
*
* Note that the "snapshot" API can be used to read the sub-buffer in reverse
* order, which is useful for flight recorder snapshots.
*/
/* Get a snapshot of the current ring buffer producer and consumer positions */
#define RING_BUFFER_SNAPSHOT _IO(0xF6, 0x00)
/* Get the consumer position (iteration start) */
#define RING_BUFFER_SNAPSHOT_GET_CONSUMED _IOR(0xF6, 0x01, unsigned long)
/* Get the producer position (iteration end) */
#define RING_BUFFER_SNAPSHOT_GET_PRODUCED _IOR(0xF6, 0x02, unsigned long)
/* Get exclusive read access to the specified sub-buffer position */
#define RING_BUFFER_GET_SUBBUF _IOW(0xF6, 0x03, unsigned long)
/* Release exclusive sub-buffer access */
#define RING_BUFFER_PUT_SUBBUF _IO(0xF6, 0x04)
/* Get exclusive read access to the next sub-buffer that can be read. */
#define RING_BUFFER_GET_NEXT_SUBBUF _IO(0xF6, 0x05)
/* Release exclusive sub-buffer access, move consumer forward. */
#define RING_BUFFER_PUT_NEXT_SUBBUF _IO(0xF6, 0x06)
/* returns the size of the current sub-buffer, without padding (for mmap). */
#define RING_BUFFER_GET_SUBBUF_SIZE _IOR(0xF6, 0x07, unsigned long)
/* returns the size of the current sub-buffer, with padding (for splice). */
#define RING_BUFFER_GET_PADDED_SUBBUF_SIZE _IOR(0xF6, 0x08, unsigned long)
/* returns the maximum size for sub-buffers. */
#define RING_BUFFER_GET_MAX_SUBBUF_SIZE _IOR(0xF6, 0x09, unsigned long)
/* returns the length to mmap. */
#define RING_BUFFER_GET_MMAP_LEN _IOR(0xF6, 0x0A, unsigned long)
/* returns the offset of the subbuffer belonging to the mmap reader. */
#define RING_BUFFER_GET_MMAP_READ_OFFSET _IOR(0xF6, 0x0B, unsigned long)
/* flush the current sub-buffer */
#define RING_BUFFER_FLUSH _IO(0xF6, 0x0C)
#endif /* _LINUX_RING_BUFFER_VFS_H */

View file

@ -1,93 +0,0 @@
/*
* ltt-context.c
*
* Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng trace/channel/event context management.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "ltt-events.h"
#include "ltt-tracer.h"
int lttng_find_context(struct lttng_ctx *ctx, const char *name)
{
unsigned int i;
for (i = 0; i < ctx->nr_fields; i++) {
/* Skip allocated (but non-initialized) contexts */
if (!ctx->fields[i].event_field.name)
continue;
if (!strcmp(ctx->fields[i].event_field.name, name))
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(lttng_find_context);
/*
* Note: as we append context information, the pointer location may change.
*/
struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx_p)
{
struct lttng_ctx_field *field;
struct lttng_ctx *ctx;
if (!*ctx_p) {
*ctx_p = kzalloc(sizeof(struct lttng_ctx), GFP_KERNEL);
if (!*ctx_p)
return NULL;
}
ctx = *ctx_p;
if (ctx->nr_fields + 1 > ctx->allocated_fields) {
struct lttng_ctx_field *new_fields;
ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
new_fields = kzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
if (!new_fields)
return NULL;
if (ctx->fields)
memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
kfree(ctx->fields);
ctx->fields = new_fields;
}
field = &ctx->fields[ctx->nr_fields];
ctx->nr_fields++;
return field;
}
EXPORT_SYMBOL_GPL(lttng_append_context);
/*
* Remove last context field.
*/
void lttng_remove_context_field(struct lttng_ctx **ctx_p,
struct lttng_ctx_field *field)
{
struct lttng_ctx *ctx;
ctx = *ctx_p;
ctx->nr_fields--;
WARN_ON_ONCE(&ctx->fields[ctx->nr_fields] != field);
memset(&ctx->fields[ctx->nr_fields], 0, sizeof(struct lttng_ctx_field));
}
EXPORT_SYMBOL_GPL(lttng_remove_context_field);
void lttng_destroy_context(struct lttng_ctx *ctx)
{
int i;
if (!ctx)
return;
for (i = 0; i < ctx->nr_fields; i++) {
if (ctx->fields[i].destroy)
ctx->fields[i].destroy(&ctx->fields[i]);
}
kfree(ctx->fields);
kfree(ctx);
}

View file

@ -1,777 +0,0 @@
/*
* ltt-debugfs-abi.c
*
* Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng debugfs ABI
*
* Mimic system calls for:
* - session creation, returns a file descriptor or failure.
* - channel creation, returns a file descriptor or failure.
* - Operates on a session file descriptor
* - Takes all channel options as parameters.
* - stream get, returns a file descriptor or failure.
* - Operates on a channel file descriptor.
* - stream notifier get, returns a file descriptor or failure.
* - Operates on a channel file descriptor.
* - event creation, returns a file descriptor or failure.
* - Operates on a channel file descriptor
* - Takes an event name as parameter
* - Takes an instrumentation source as parameter
* - e.g. tracepoints, dynamic_probes...
* - Takes instrumentation source specific arguments.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/proc_fs.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/ringbuffer/vfs.h"
#include "wrapper/poll.h"
#include "ltt-debugfs-abi.h"
#include "ltt-events.h"
#include "ltt-tracer.h"
/*
* This is LTTng's own personal way to create a system call as an external
* module. We use ioctl() on /sys/kernel/debug/lttng.
*/
static struct dentry *lttng_dentry;
static struct proc_dir_entry *lttng_proc_dentry;
static const struct file_operations lttng_fops;
static const struct file_operations lttng_session_fops;
static const struct file_operations lttng_channel_fops;
static const struct file_operations lttng_metadata_fops;
static const struct file_operations lttng_event_fops;
/*
* Teardown management: opened file descriptors keep a refcount on the module,
* so it can only exit when all file descriptors are closed.
*/
enum channel_type {
PER_CPU_CHANNEL,
METADATA_CHANNEL,
};
static
int lttng_abi_create_session(void)
{
struct ltt_session *session;
struct file *session_file;
int session_fd, ret;
session = ltt_session_create();
if (!session)
return -ENOMEM;
session_fd = get_unused_fd();
if (session_fd < 0) {
ret = session_fd;
goto fd_error;
}
session_file = anon_inode_getfile("[lttng_session]",
&lttng_session_fops,
session, O_RDWR);
if (IS_ERR(session_file)) {
ret = PTR_ERR(session_file);
goto file_error;
}
session->file = session_file;
fd_install(session_fd, session_file);
return session_fd;
file_error:
put_unused_fd(session_fd);
fd_error:
ltt_session_destroy(session);
return ret;
}
static
int lttng_abi_tracepoint_list(void)
{
struct file *tracepoint_list_file;
int file_fd, ret;
file_fd = get_unused_fd();
if (file_fd < 0) {
ret = file_fd;
goto fd_error;
}
tracepoint_list_file = anon_inode_getfile("[lttng_session]",
&lttng_tracepoint_list_fops,
NULL, O_RDWR);
if (IS_ERR(tracepoint_list_file)) {
ret = PTR_ERR(tracepoint_list_file);
goto file_error;
}
ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
if (ret < 0)
goto open_error;
fd_install(file_fd, tracepoint_list_file);
if (file_fd < 0) {
ret = file_fd;
goto fd_error;
}
return file_fd;
open_error:
fput(tracepoint_list_file);
file_error:
put_unused_fd(file_fd);
fd_error:
return ret;
}
static
long lttng_abi_tracer_version(struct file *file,
struct lttng_kernel_tracer_version __user *uversion_param)
{
struct lttng_kernel_tracer_version v;
v.version = LTTNG_VERSION;
v.patchlevel = LTTNG_PATCHLEVEL;
v.sublevel = LTTNG_SUBLEVEL;
if (copy_to_user(uversion_param, &v, sizeof(v)))
return -EFAULT;
return 0;
}
static
long lttng_abi_add_context(struct file *file,
struct lttng_kernel_context __user *ucontext_param,
struct lttng_ctx **ctx, struct ltt_session *session)
{
struct lttng_kernel_context context_param;
if (session->been_active)
return -EPERM;
if (copy_from_user(&context_param, ucontext_param, sizeof(context_param)))
return -EFAULT;
switch (context_param.ctx) {
case LTTNG_KERNEL_CONTEXT_PID:
return lttng_add_pid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_PRIO:
return lttng_add_prio_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_NICE:
return lttng_add_nice_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_VPID:
return lttng_add_vpid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_TID:
return lttng_add_tid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_VTID:
return lttng_add_vtid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_PPID:
return lttng_add_ppid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_VPPID:
return lttng_add_vppid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
context_param.u.perf_counter.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
return lttng_add_perf_counter_to_ctx(context_param.u.perf_counter.type,
context_param.u.perf_counter.config,
context_param.u.perf_counter.name,
ctx);
case LTTNG_KERNEL_CONTEXT_PROCNAME:
return lttng_add_procname_to_ctx(ctx);
default:
return -EINVAL;
}
}
/**
* lttng_ioctl - lttng syscall through ioctl
*
* @file: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements lttng commands:
* LTTNG_KERNEL_SESSION
* Returns a LTTng trace session file descriptor
* LTTNG_KERNEL_TRACER_VERSION
* Returns the LTTng kernel tracer version
* LTTNG_KERNEL_TRACEPOINT_LIST
* Returns a file descriptor listing available tracepoints
* LTTNG_KERNEL_WAIT_QUIESCENT
* Returns after all previously running probes have completed
*
* The returned session will be deleted when its file descriptor is closed.
*/
static
long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case LTTNG_KERNEL_SESSION:
return lttng_abi_create_session();
case LTTNG_KERNEL_TRACER_VERSION:
return lttng_abi_tracer_version(file,
(struct lttng_kernel_tracer_version __user *) arg);
case LTTNG_KERNEL_TRACEPOINT_LIST:
return lttng_abi_tracepoint_list();
case LTTNG_KERNEL_WAIT_QUIESCENT:
synchronize_trace();
return 0;
case LTTNG_KERNEL_CALIBRATE:
{
struct lttng_kernel_calibrate __user *ucalibrate =
(struct lttng_kernel_calibrate __user *) arg;
struct lttng_kernel_calibrate calibrate;
int ret;
if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
return -EFAULT;
ret = lttng_calibrate(&calibrate);
if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
return -EFAULT;
return ret;
}
default:
return -ENOIOCTLCMD;
}
}
static const struct file_operations lttng_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = lttng_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_ioctl,
#endif
};
/*
* We tolerate no failure in this function (if one happens, we print a dmesg
* error, but cannot return any error, because the channel information is
* invariant.
*/
static
void lttng_metadata_create_events(struct file *channel_file)
{
struct ltt_channel *channel = channel_file->private_data;
static struct lttng_kernel_event metadata_params = {
.instrumentation = LTTNG_KERNEL_TRACEPOINT,
.name = "lttng_metadata",
};
struct ltt_event *event;
/*
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
event = ltt_event_create(channel, &metadata_params, NULL, NULL);
if (!event) {
goto create_error;
}
return;
create_error:
WARN_ON(1);
return; /* not allowed to return error */
}
static
int lttng_abi_create_channel(struct file *session_file,
struct lttng_kernel_channel __user *uchan_param,
enum channel_type channel_type)
{
struct ltt_session *session = session_file->private_data;
const struct file_operations *fops = NULL;
const char *transport_name;
struct ltt_channel *chan;
struct file *chan_file;
struct lttng_kernel_channel chan_param;
int chan_fd;
int ret = 0;
if (copy_from_user(&chan_param, uchan_param, sizeof(chan_param)))
return -EFAULT;
chan_fd = get_unused_fd();
if (chan_fd < 0) {
ret = chan_fd;
goto fd_error;
}
switch (channel_type) {
case PER_CPU_CHANNEL:
fops = &lttng_channel_fops;
break;
case METADATA_CHANNEL:
fops = &lttng_metadata_fops;
break;
}
chan_file = anon_inode_getfile("[lttng_channel]",
fops,
NULL, O_RDWR);
if (IS_ERR(chan_file)) {
ret = PTR_ERR(chan_file);
goto file_error;
}
switch (channel_type) {
case PER_CPU_CHANNEL:
if (chan_param.output == LTTNG_KERNEL_SPLICE) {
transport_name = chan_param.overwrite ?
"relay-overwrite" : "relay-discard";
} else if (chan_param.output == LTTNG_KERNEL_MMAP) {
transport_name = chan_param.overwrite ?
"relay-overwrite-mmap" : "relay-discard-mmap";
} else {
return -EINVAL;
}
break;
case METADATA_CHANNEL:
if (chan_param.output == LTTNG_KERNEL_SPLICE)
transport_name = "relay-metadata";
else if (chan_param.output == LTTNG_KERNEL_MMAP)
transport_name = "relay-metadata-mmap";
else
return -EINVAL;
break;
default:
transport_name = "<unknown>";
break;
}
/*
* We tolerate no failure path after channel creation. It will stay
* invariant for the rest of the session.
*/
chan = ltt_channel_create(session, transport_name, NULL,
chan_param.subbuf_size,
chan_param.num_subbuf,
chan_param.switch_timer_interval,
chan_param.read_timer_interval);
if (!chan) {
ret = -EINVAL;
goto chan_error;
}
chan->file = chan_file;
chan_file->private_data = chan;
fd_install(chan_fd, chan_file);
if (channel_type == METADATA_CHANNEL) {
session->metadata = chan;
lttng_metadata_create_events(chan_file);
}
/* The channel created holds a reference on the session */
atomic_long_inc(&session_file->f_count);
return chan_fd;
chan_error:
fput(chan_file);
file_error:
put_unused_fd(chan_fd);
fd_error:
return ret;
}
/**
* lttng_session_ioctl - lttng session fd ioctl
*
* @file: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements lttng commands:
* LTTNG_KERNEL_CHANNEL
* Returns a LTTng channel file descriptor
* LTTNG_KERNEL_ENABLE
* Enables tracing for a session (weak enable)
* LTTNG_KERNEL_DISABLE
* Disables tracing for a session (strong disable)
* LTTNG_KERNEL_METADATA
* Returns a LTTng metadata file descriptor
*
* The returned channel will be deleted when its file descriptor is closed.
*/
static
long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ltt_session *session = file->private_data;
switch (cmd) {
case LTTNG_KERNEL_CHANNEL:
return lttng_abi_create_channel(file,
(struct lttng_kernel_channel __user *) arg,
PER_CPU_CHANNEL);
case LTTNG_KERNEL_SESSION_START:
case LTTNG_KERNEL_ENABLE:
return ltt_session_enable(session);
case LTTNG_KERNEL_SESSION_STOP:
case LTTNG_KERNEL_DISABLE:
return ltt_session_disable(session);
case LTTNG_KERNEL_METADATA:
return lttng_abi_create_channel(file,
(struct lttng_kernel_channel __user *) arg,
METADATA_CHANNEL);
default:
return -ENOIOCTLCMD;
}
}
/*
* Called when the last file reference is dropped.
*
* Big fat note: channels and events are invariant for the whole session after
* their creation. So this session destruction also destroys all channel and
* event structures specific to this session (they are not destroyed when their
* individual file is released).
*/
static
int lttng_session_release(struct inode *inode, struct file *file)
{
struct ltt_session *session = file->private_data;
if (session)
ltt_session_destroy(session);
return 0;
}
static const struct file_operations lttng_session_fops = {
.owner = THIS_MODULE,
.release = lttng_session_release,
.unlocked_ioctl = lttng_session_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_session_ioctl,
#endif
};
static
int lttng_abi_open_stream(struct file *channel_file)
{
struct ltt_channel *channel = channel_file->private_data;
struct lib_ring_buffer *buf;
int stream_fd, ret;
struct file *stream_file;
buf = channel->ops->buffer_read_open(channel->chan);
if (!buf)
return -ENOENT;
stream_fd = get_unused_fd();
if (stream_fd < 0) {
ret = stream_fd;
goto fd_error;
}
stream_file = anon_inode_getfile("[lttng_stream]",
&lib_ring_buffer_file_operations,
buf, O_RDWR);
if (IS_ERR(stream_file)) {
ret = PTR_ERR(stream_file);
goto file_error;
}
/*
* OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
* FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
* file descriptor, so we set FMODE_PREAD here.
*/
stream_file->f_mode |= FMODE_PREAD;
fd_install(stream_fd, stream_file);
/*
* The stream holds a reference to the channel within the generic ring
* buffer library, so no need to hold a refcount on the channel and
* session files here.
*/
return stream_fd;
file_error:
put_unused_fd(stream_fd);
fd_error:
channel->ops->buffer_read_close(buf);
return ret;
}
static
int lttng_abi_create_event(struct file *channel_file,
struct lttng_kernel_event __user *uevent_param)
{
struct ltt_channel *channel = channel_file->private_data;
struct ltt_event *event;
struct lttng_kernel_event event_param;
int event_fd, ret;
struct file *event_file;
if (copy_from_user(&event_param, uevent_param, sizeof(event_param)))
return -EFAULT;
event_param.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
switch (event_param.instrumentation) {
case LTTNG_KERNEL_KRETPROBE:
event_param.u.kretprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
break;
case LTTNG_KERNEL_KPROBE:
event_param.u.kprobe.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
break;
case LTTNG_KERNEL_FUNCTION:
event_param.u.ftrace.symbol_name[LTTNG_SYM_NAME_LEN - 1] = '\0';
break;
default:
break;
}
switch (event_param.instrumentation) {
default:
event_fd = get_unused_fd();
if (event_fd < 0) {
ret = event_fd;
goto fd_error;
}
event_file = anon_inode_getfile("[lttng_event]",
&lttng_event_fops,
NULL, O_RDWR);
if (IS_ERR(event_file)) {
ret = PTR_ERR(event_file);
goto file_error;
}
/*
* We tolerate no failure path after event creation. It
* will stay invariant for the rest of the session.
*/
event = ltt_event_create(channel, &event_param, NULL, NULL);
if (!event) {
ret = -EINVAL;
goto event_error;
}
event_file->private_data = event;
fd_install(event_fd, event_file);
/* The event holds a reference on the channel */
atomic_long_inc(&channel_file->f_count);
break;
case LTTNG_KERNEL_SYSCALL:
/*
* Only all-syscall tracing supported for now.
*/
if (event_param.name[0] != '\0')
return -EINVAL;
ret = lttng_syscalls_register(channel, NULL);
if (ret)
goto fd_error;
event_fd = 0;
break;
}
return event_fd;
event_error:
fput(event_file);
file_error:
put_unused_fd(event_fd);
fd_error:
return ret;
}
/**
* lttng_channel_ioctl - lttng syscall through ioctl
*
* @file: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements lttng commands:
* LTTNG_KERNEL_STREAM
* Returns an event stream file descriptor or failure.
* (typically, one event stream records events from one CPU)
* LTTNG_KERNEL_EVENT
* Returns an event file descriptor or failure.
* LTTNG_KERNEL_CONTEXT
* Prepend a context field to each event in the channel
* LTTNG_KERNEL_ENABLE
* Enable recording for events in this channel (weak enable)
* LTTNG_KERNEL_DISABLE
* Disable recording for events in this channel (strong disable)
*
* Channel and event file descriptors also hold a reference on the session.
*/
static
long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ltt_channel *channel = file->private_data;
switch (cmd) {
case LTTNG_KERNEL_STREAM:
return lttng_abi_open_stream(file);
case LTTNG_KERNEL_EVENT:
return lttng_abi_create_event(file, (struct lttng_kernel_event __user *) arg);
case LTTNG_KERNEL_CONTEXT:
return lttng_abi_add_context(file,
(struct lttng_kernel_context __user *) arg,
&channel->ctx, channel->session);
case LTTNG_KERNEL_ENABLE:
return ltt_channel_enable(channel);
case LTTNG_KERNEL_DISABLE:
return ltt_channel_disable(channel);
default:
return -ENOIOCTLCMD;
}
}
/**
* lttng_metadata_ioctl - lttng syscall through ioctl
*
* @file: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements lttng commands:
* LTTNG_KERNEL_STREAM
* Returns an event stream file descriptor or failure.
*
* Channel and event file descriptors also hold a reference on the session.
*/
static
long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case LTTNG_KERNEL_STREAM:
return lttng_abi_open_stream(file);
default:
return -ENOIOCTLCMD;
}
}
/**
* lttng_channel_poll - lttng stream addition/removal monitoring
*
* @file: the file
* @wait: poll table
*/
unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
{
struct ltt_channel *channel = file->private_data;
unsigned int mask = 0;
if (file->f_mode & FMODE_READ) {
poll_wait_set_exclusive(wait);
poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
wait);
if (channel->ops->is_disabled(channel->chan))
return POLLERR;
if (channel->ops->is_finalized(channel->chan))
return POLLHUP;
if (channel->ops->buffer_has_read_closed_stream(channel->chan))
return POLLIN | POLLRDNORM;
return 0;
}
return mask;
}
static
int lttng_channel_release(struct inode *inode, struct file *file)
{
struct ltt_channel *channel = file->private_data;
if (channel)
fput(channel->session->file);
return 0;
}
static const struct file_operations lttng_channel_fops = {
.owner = THIS_MODULE,
.release = lttng_channel_release,
.poll = lttng_channel_poll,
.unlocked_ioctl = lttng_channel_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_channel_ioctl,
#endif
};
static const struct file_operations lttng_metadata_fops = {
.owner = THIS_MODULE,
.release = lttng_channel_release,
.unlocked_ioctl = lttng_metadata_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_metadata_ioctl,
#endif
};
/**
* lttng_event_ioctl - lttng syscall through ioctl
*
* @file: the file
* @cmd: the command
* @arg: command arg
*
* This ioctl implements lttng commands:
* LTTNG_KERNEL_CONTEXT
* Prepend a context field to each record of this event
* LTTNG_KERNEL_ENABLE
* Enable recording for this event (weak enable)
* LTTNG_KERNEL_DISABLE
* Disable recording for this event (strong disable)
*/
static
long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ltt_event *event = file->private_data;
switch (cmd) {
case LTTNG_KERNEL_CONTEXT:
return lttng_abi_add_context(file,
(struct lttng_kernel_context __user *) arg,
&event->ctx, event->chan->session);
case LTTNG_KERNEL_ENABLE:
return ltt_event_enable(event);
case LTTNG_KERNEL_DISABLE:
return ltt_event_disable(event);
default:
return -ENOIOCTLCMD;
}
}
static
int lttng_event_release(struct inode *inode, struct file *file)
{
struct ltt_event *event = file->private_data;
if (event)
fput(event->chan->file);
return 0;
}
/* TODO: filter control ioctl */
static const struct file_operations lttng_event_fops = {
.owner = THIS_MODULE,
.release = lttng_event_release,
.unlocked_ioctl = lttng_event_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = lttng_event_ioctl,
#endif
};
int __init ltt_debugfs_abi_init(void)
{
int ret = 0;
wrapper_vmalloc_sync_all();
lttng_dentry = debugfs_create_file("lttng", S_IWUSR, NULL, NULL,
&lttng_fops);
if (IS_ERR(lttng_dentry))
lttng_dentry = NULL;
lttng_proc_dentry = proc_create_data("lttng", S_IWUSR, NULL,
&lttng_fops, NULL);
if (!lttng_dentry && !lttng_proc_dentry) {
printk(KERN_ERR "Error creating LTTng control file\n");
ret = -ENOMEM;
goto error;
}
error:
return ret;
}
void __exit ltt_debugfs_abi_exit(void)
{
if (lttng_dentry)
debugfs_remove(lttng_dentry);
if (lttng_proc_dentry)
remove_proc_entry("lttng", NULL);
}

View file

@ -1,153 +0,0 @@
#ifndef _LTT_DEBUGFS_ABI_H
#define _LTT_DEBUGFS_ABI_H
/*
* ltt-debugfs-abi.h
*
* Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng debugfs ABI header
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/fs.h>
#define LTTNG_SYM_NAME_LEN 256
enum lttng_kernel_instrumentation {
LTTNG_KERNEL_TRACEPOINT = 0,
LTTNG_KERNEL_KPROBE = 1,
LTTNG_KERNEL_FUNCTION = 2,
LTTNG_KERNEL_KRETPROBE = 3,
LTTNG_KERNEL_NOOP = 4, /* not hooked */
LTTNG_KERNEL_SYSCALL = 5,
};
/*
* LTTng consumer mode
*/
enum lttng_kernel_output {
LTTNG_KERNEL_SPLICE = 0,
LTTNG_KERNEL_MMAP = 1,
};
/*
* LTTng DebugFS ABI structures.
*/
struct lttng_kernel_channel {
int overwrite; /* 1: overwrite, 0: discard */
uint64_t subbuf_size; /* in bytes */
uint64_t num_subbuf;
unsigned int switch_timer_interval; /* usecs */
unsigned int read_timer_interval; /* usecs */
enum lttng_kernel_output output; /* splice, mmap */
};
struct lttng_kernel_kretprobe {
uint64_t addr;
uint64_t offset;
char symbol_name[LTTNG_SYM_NAME_LEN];
};
/*
* Either addr is used, or symbol_name and offset.
*/
struct lttng_kernel_kprobe {
uint64_t addr;
uint64_t offset;
char symbol_name[LTTNG_SYM_NAME_LEN];
};
struct lttng_kernel_function_tracer {
char symbol_name[LTTNG_SYM_NAME_LEN];
};
/*
* For syscall tracing, name = '\0' means "enable all".
*/
struct lttng_kernel_event {
char name[LTTNG_SYM_NAME_LEN]; /* event name */
enum lttng_kernel_instrumentation instrumentation;
/* Per instrumentation type configuration */
union {
struct lttng_kernel_kretprobe kretprobe;
struct lttng_kernel_kprobe kprobe;
struct lttng_kernel_function_tracer ftrace;
} u;
};
struct lttng_kernel_tracer_version {
uint32_t version;
uint32_t patchlevel;
uint32_t sublevel;
};
enum lttng_kernel_calibrate_type {
LTTNG_KERNEL_CALIBRATE_KRETPROBE,
};
struct lttng_kernel_calibrate {
enum lttng_kernel_calibrate_type type; /* type (input) */
};
enum lttng_kernel_context_type {
LTTNG_KERNEL_CONTEXT_PID = 0,
LTTNG_KERNEL_CONTEXT_PERF_COUNTER = 1,
LTTNG_KERNEL_CONTEXT_PROCNAME = 2,
LTTNG_KERNEL_CONTEXT_PRIO = 3,
LTTNG_KERNEL_CONTEXT_NICE = 4,
LTTNG_KERNEL_CONTEXT_VPID = 5,
LTTNG_KERNEL_CONTEXT_TID = 6,
LTTNG_KERNEL_CONTEXT_VTID = 7,
LTTNG_KERNEL_CONTEXT_PPID = 8,
LTTNG_KERNEL_CONTEXT_VPPID = 9,
};
struct lttng_kernel_perf_counter_ctx {
uint32_t type;
uint64_t config;
char name[LTTNG_SYM_NAME_LEN];
};
struct lttng_kernel_context {
enum lttng_kernel_context_type ctx;
union {
struct lttng_kernel_perf_counter_ctx perf_counter;
} u;
};
/* LTTng file descriptor ioctl */
#define LTTNG_KERNEL_SESSION _IO(0xF6, 0x40)
#define LTTNG_KERNEL_TRACER_VERSION \
_IOR(0xF6, 0x41, struct lttng_kernel_tracer_version)
#define LTTNG_KERNEL_TRACEPOINT_LIST _IO(0xF6, 0x42)
#define LTTNG_KERNEL_WAIT_QUIESCENT _IO(0xF6, 0x43)
#define LTTNG_KERNEL_CALIBRATE \
_IOWR(0xF6, 0x44, struct lttng_kernel_calibrate)
/* Session FD ioctl */
#define LTTNG_KERNEL_METADATA \
_IOW(0xF6, 0x50, struct lttng_kernel_channel)
#define LTTNG_KERNEL_CHANNEL \
_IOW(0xF6, 0x51, struct lttng_kernel_channel)
#define LTTNG_KERNEL_SESSION_START _IO(0xF6, 0x52)
#define LTTNG_KERNEL_SESSION_STOP _IO(0xF6, 0x53)
/* Channel FD ioctl */
#define LTTNG_KERNEL_STREAM _IO(0xF6, 0x60)
#define LTTNG_KERNEL_EVENT \
_IOW(0xF6, 0x61, struct lttng_kernel_event)
/* Event and Channel FD ioctl */
#define LTTNG_KERNEL_CONTEXT \
_IOW(0xF6, 0x70, struct lttng_kernel_context)
/* Event, Channel and Session ioctl */
#define LTTNG_KERNEL_ENABLE _IO(0xF6, 0x80)
#define LTTNG_KERNEL_DISABLE _IO(0xF6, 0x81)
#endif /* _LTT_DEBUGFS_ABI_H */

View file

@ -1,31 +0,0 @@
#ifndef _LTT_ENDIAN_H
#define _LTT_ENDIAN_H
/*
* ltt-endian.h
*
* Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#ifdef __KERNEL__
# include <asm/byteorder.h>
# ifdef __BIG_ENDIAN
# define __BYTE_ORDER __BIG_ENDIAN
# elif defined(__LITTLE_ENDIAN)
# define __BYTE_ORDER __LITTLE_ENDIAN
# else
# error "unknown endianness"
# endif
#ifndef __BIG_ENDIAN
# define __BIG_ENDIAN 4321
#endif
#ifndef __LITTLE_ENDIAN
# define __LITTLE_ENDIAN 1234
#endif
#else
# include <endian.h>
#endif
#endif /* _LTT_ENDIAN_H */

File diff suppressed because it is too large Load diff

View file

@ -1,452 +0,0 @@
#ifndef _LTT_EVENTS_H
#define _LTT_EVENTS_H
/*
* ltt-events.h
*
* Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Holds LTTng per-session event registry.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/list.h>
#include <linux/kprobes.h>
#include "wrapper/uuid.h"
#include "ltt-debugfs-abi.h"
#undef is_signed_type
#define is_signed_type(type) (((type)(-1)) < 0)
struct ltt_channel;
struct ltt_session;
struct lib_ring_buffer_ctx;
struct perf_event;
struct perf_event_attr;
/* Type description */
/* Update the astract_types name table in lttng-types.c along with this enum */
enum abstract_types {
atype_integer,
atype_enum,
atype_array,
atype_sequence,
atype_string,
NR_ABSTRACT_TYPES,
};
/* Update the string_encodings name table in lttng-types.c along with this enum */
enum lttng_string_encodings {
lttng_encode_none = 0,
lttng_encode_UTF8 = 1,
lttng_encode_ASCII = 2,
NR_STRING_ENCODINGS,
};
struct lttng_enum_entry {
unsigned long long start, end; /* start and end are inclusive */
const char *string;
};
#define __type_integer(_type, _byte_order, _base, _encoding) \
{ \
.atype = atype_integer, \
.u.basic.integer = \
{ \
.size = sizeof(_type) * CHAR_BIT, \
.alignment = ltt_alignof(_type) * CHAR_BIT, \
.signedness = is_signed_type(_type), \
.reverse_byte_order = _byte_order != __BYTE_ORDER, \
.base = _base, \
.encoding = lttng_encode_##_encoding, \
}, \
} \
struct lttng_integer_type {
unsigned int size; /* in bits */
unsigned short alignment; /* in bits */
uint signedness:1;
uint reverse_byte_order:1;
unsigned int base; /* 2, 8, 10, 16, for pretty print */
enum lttng_string_encodings encoding;
};
union _lttng_basic_type {
struct lttng_integer_type integer;
struct {
const char *name;
} enumeration;
struct {
enum lttng_string_encodings encoding;
} string;
};
struct lttng_basic_type {
enum abstract_types atype;
union {
union _lttng_basic_type basic;
} u;
};
struct lttng_type {
enum abstract_types atype;
union {
union _lttng_basic_type basic;
struct {
struct lttng_basic_type elem_type;
unsigned int length; /* num. elems. */
} array;
struct {
struct lttng_basic_type length_type;
struct lttng_basic_type elem_type;
} sequence;
} u;
};
struct lttng_enum {
const char *name;
struct lttng_type container_type;
const struct lttng_enum_entry *entries;
unsigned int len;
};
/* Event field description */
struct lttng_event_field {
const char *name;
struct lttng_type type;
};
/*
* We need to keep this perf counter field separately from struct
* lttng_ctx_field because cpu hotplug needs fixed-location addresses.
*/
struct lttng_perf_counter_field {
struct notifier_block nb;
int hp_enable;
struct perf_event_attr *attr;
struct perf_event **e; /* per-cpu array */
};
struct lttng_ctx_field {
struct lttng_event_field event_field;
size_t (*get_size)(size_t offset);
void (*record)(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan);
union {
struct lttng_perf_counter_field *perf_counter;
} u;
void (*destroy)(struct lttng_ctx_field *field);
};
struct lttng_ctx {
struct lttng_ctx_field *fields;
unsigned int nr_fields;
unsigned int allocated_fields;
};
struct lttng_event_desc {
const char *name;
void *probe_callback;
const struct lttng_event_ctx *ctx; /* context */
const struct lttng_event_field *fields; /* event payload */
unsigned int nr_fields;
struct module *owner;
};
struct lttng_probe_desc {
const struct lttng_event_desc **event_desc;
unsigned int nr_events;
struct list_head head; /* chain registered probes */
};
struct lttng_krp; /* Kretprobe handling */
/*
* ltt_event structure is referred to by the tracing fast path. It must be
* kept small.
*/
struct ltt_event {
unsigned int id;
struct ltt_channel *chan;
int enabled;
const struct lttng_event_desc *desc;
void *filter;
struct lttng_ctx *ctx;
enum lttng_kernel_instrumentation instrumentation;
union {
struct {
struct kprobe kp;
char *symbol_name;
} kprobe;
struct {
struct lttng_krp *lttng_krp;
char *symbol_name;
} kretprobe;
struct {
char *symbol_name;
} ftrace;
} u;
struct list_head list; /* Event list */
uint metadata_dumped:1;
};
struct ltt_channel_ops {
struct channel *(*channel_create)(const char *name,
struct ltt_channel *ltt_chan,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
void (*channel_destroy)(struct channel *chan);
struct lib_ring_buffer *(*buffer_read_open)(struct channel *chan);
int (*buffer_has_read_closed_stream)(struct channel *chan);
void (*buffer_read_close)(struct lib_ring_buffer *buf);
int (*event_reserve)(struct lib_ring_buffer_ctx *ctx,
uint32_t event_id);
void (*event_commit)(struct lib_ring_buffer_ctx *ctx);
void (*event_write)(struct lib_ring_buffer_ctx *ctx, const void *src,
size_t len);
void (*event_write_from_user)(struct lib_ring_buffer_ctx *ctx,
const void *src, size_t len);
void (*event_memset)(struct lib_ring_buffer_ctx *ctx,
int c, size_t len);
/*
* packet_avail_size returns the available size in the current
* packet. Note that the size returned is only a hint, since it
* may change due to concurrent writes.
*/
size_t (*packet_avail_size)(struct channel *chan);
wait_queue_head_t *(*get_writer_buf_wait_queue)(struct channel *chan, int cpu);
wait_queue_head_t *(*get_hp_wait_queue)(struct channel *chan);
int (*is_finalized)(struct channel *chan);
int (*is_disabled)(struct channel *chan);
};
struct ltt_transport {
char *name;
struct module *owner;
struct list_head node;
struct ltt_channel_ops ops;
};
struct ltt_channel {
unsigned int id;
struct channel *chan; /* Channel buffers */
int enabled;
struct lttng_ctx *ctx;
/* Event ID management */
struct ltt_session *session;
struct file *file; /* File associated to channel */
unsigned int free_event_id; /* Next event ID to allocate */
struct list_head list; /* Channel list */
struct ltt_channel_ops *ops;
struct ltt_transport *transport;
struct ltt_event **sc_table; /* for syscall tracing */
struct ltt_event **compat_sc_table;
struct ltt_event *sc_unknown; /* for unknown syscalls */
struct ltt_event *sc_compat_unknown;
struct ltt_event *sc_exit; /* for syscall exit */
int header_type; /* 0: unset, 1: compact, 2: large */
uint metadata_dumped:1;
};
struct ltt_session {
int active; /* Is trace session active ? */
int been_active; /* Has trace session been active ? */
struct file *file; /* File associated to session */
struct ltt_channel *metadata; /* Metadata channel */
struct list_head chan; /* Channel list head */
struct list_head events; /* Event list head */
struct list_head list; /* Session list */
unsigned int free_chan_id; /* Next chan ID to allocate */
uuid_le uuid; /* Trace session unique ID */
uint metadata_dumped:1;
};
struct ltt_session *ltt_session_create(void);
int ltt_session_enable(struct ltt_session *session);
int ltt_session_disable(struct ltt_session *session);
void ltt_session_destroy(struct ltt_session *session);
struct ltt_channel *ltt_channel_create(struct ltt_session *session,
const char *transport_name,
void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
struct ltt_channel *ltt_global_channel_create(struct ltt_session *session,
int overwrite, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval);
struct ltt_event *ltt_event_create(struct ltt_channel *chan,
struct lttng_kernel_event *event_param,
void *filter,
const struct lttng_event_desc *internal_desc);
int ltt_channel_enable(struct ltt_channel *channel);
int ltt_channel_disable(struct ltt_channel *channel);
int ltt_event_enable(struct ltt_event *event);
int ltt_event_disable(struct ltt_event *event);
void ltt_transport_register(struct ltt_transport *transport);
void ltt_transport_unregister(struct ltt_transport *transport);
void synchronize_trace(void);
int ltt_debugfs_abi_init(void);
void ltt_debugfs_abi_exit(void);
int ltt_probe_register(struct lttng_probe_desc *desc);
void ltt_probe_unregister(struct lttng_probe_desc *desc);
const struct lttng_event_desc *ltt_event_get(const char *name);
void ltt_event_put(const struct lttng_event_desc *desc);
int ltt_probes_init(void);
void ltt_probes_exit(void);
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
int lttng_syscalls_register(struct ltt_channel *chan, void *filter);
int lttng_syscalls_unregister(struct ltt_channel *chan);
#else
static inline int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
{
return -ENOSYS;
}
static inline int lttng_syscalls_unregister(struct ltt_channel *chan)
{
return 0;
}
#endif
struct lttng_ctx_field *lttng_append_context(struct lttng_ctx **ctx);
int lttng_find_context(struct lttng_ctx *ctx, const char *name);
void lttng_remove_context_field(struct lttng_ctx **ctx,
struct lttng_ctx_field *field);
void lttng_destroy_context(struct lttng_ctx *ctx);
int lttng_add_pid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_procname_to_ctx(struct lttng_ctx **ctx);
int lttng_add_prio_to_ctx(struct lttng_ctx **ctx);
int lttng_add_nice_to_ctx(struct lttng_ctx **ctx);
int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_tid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx);
int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx);
#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
struct lttng_ctx **ctx);
#else
static inline
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
struct lttng_ctx **ctx)
{
return -ENOSYS;
}
#endif
#ifdef CONFIG_KPROBES
int lttng_kprobes_register(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct ltt_event *event);
void lttng_kprobes_unregister(struct ltt_event *event);
void lttng_kprobes_destroy_private(struct ltt_event *event);
#else
static inline
int lttng_kprobes_register(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct ltt_event *event)
{
return -ENOSYS;
}
static inline
void lttng_kprobes_unregister(struct ltt_event *event)
{
}
static inline
void lttng_kprobes_destroy_private(struct ltt_event *event)
{
}
#endif
#ifdef CONFIG_KRETPROBES
int lttng_kretprobes_register(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct ltt_event *event_entry,
struct ltt_event *event_exit);
void lttng_kretprobes_unregister(struct ltt_event *event);
void lttng_kretprobes_destroy_private(struct ltt_event *event);
#else
static inline
int lttng_kretprobes_register(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct ltt_event *event_entry,
struct ltt_event *event_exit)
{
return -ENOSYS;
}
static inline
void lttng_kretprobes_unregister(struct ltt_event *event)
{
}
static inline
void lttng_kretprobes_destroy_private(struct ltt_event *event)
{
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int lttng_ftrace_register(const char *name,
const char *symbol_name,
struct ltt_event *event);
void lttng_ftrace_unregister(struct ltt_event *event);
void lttng_ftrace_destroy_private(struct ltt_event *event);
#else
static inline
int lttng_ftrace_register(const char *name,
const char *symbol_name,
struct ltt_event *event)
{
return -ENOSYS;
}
static inline
void lttng_ftrace_unregister(struct ltt_event *event)
{
}
static inline
void lttng_ftrace_destroy_private(struct ltt_event *event)
{
}
#endif
int lttng_calibrate(struct lttng_kernel_calibrate *calibrate);
extern const struct file_operations lttng_tracepoint_list_fops;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
#define TRACEPOINT_HAS_DATA_ARG
#endif
#endif /* _LTT_EVENTS_H */

View file

@ -1,164 +0,0 @@
/*
* ltt-probes.c
*
* Copyright 2010 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Holds LTTng probes registry.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include "ltt-events.h"
static LIST_HEAD(probe_list);
static DEFINE_MUTEX(probe_mutex);
static
const struct lttng_event_desc *find_event(const char *name)
{
struct lttng_probe_desc *probe_desc;
int i;
list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (!strcmp(probe_desc->event_desc[i]->name, name))
return probe_desc->event_desc[i];
}
}
return NULL;
}
int ltt_probe_register(struct lttng_probe_desc *desc)
{
int ret = 0;
int i;
mutex_lock(&probe_mutex);
/*
* TODO: This is O(N^2). Turn into a hash table when probe registration
* overhead becomes an issue.
*/
for (i = 0; i < desc->nr_events; i++) {
if (find_event(desc->event_desc[i]->name)) {
ret = -EEXIST;
goto end;
}
}
list_add(&desc->head, &probe_list);
end:
mutex_unlock(&probe_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(ltt_probe_register);
void ltt_probe_unregister(struct lttng_probe_desc *desc)
{
mutex_lock(&probe_mutex);
list_del(&desc->head);
mutex_unlock(&probe_mutex);
}
EXPORT_SYMBOL_GPL(ltt_probe_unregister);
const struct lttng_event_desc *ltt_event_get(const char *name)
{
const struct lttng_event_desc *event;
int ret;
mutex_lock(&probe_mutex);
event = find_event(name);
mutex_unlock(&probe_mutex);
if (!event)
return NULL;
ret = try_module_get(event->owner);
WARN_ON_ONCE(!ret);
return event;
}
EXPORT_SYMBOL_GPL(ltt_event_get);
void ltt_event_put(const struct lttng_event_desc *event)
{
module_put(event->owner);
}
EXPORT_SYMBOL_GPL(ltt_event_put);
static
void *tp_list_start(struct seq_file *m, loff_t *pos)
{
struct lttng_probe_desc *probe_desc;
int iter = 0, i;
mutex_lock(&probe_mutex);
list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *pos)
return (void *) probe_desc->event_desc[i];
}
}
/* End of list */
return NULL;
}
static
void *tp_list_next(struct seq_file *m, void *p, loff_t *ppos)
{
struct lttng_probe_desc *probe_desc;
int iter = 0, i;
(*ppos)++;
list_for_each_entry(probe_desc, &probe_list, head) {
for (i = 0; i < probe_desc->nr_events; i++) {
if (iter++ >= *ppos)
return (void *) probe_desc->event_desc[i];
}
}
/* End of list */
return NULL;
}
static
void tp_list_stop(struct seq_file *m, void *p)
{
mutex_unlock(&probe_mutex);
}
static
int tp_list_show(struct seq_file *m, void *p)
{
const struct lttng_event_desc *probe_desc = p;
/*
* Don't export lttng internal events (metadata).
*/
if (!strncmp(probe_desc->name, "lttng_", sizeof("lttng_") - 1))
return 0;
seq_printf(m, "event { name = %s; };\n",
probe_desc->name);
return 0;
}
static
const struct seq_operations lttng_tracepoint_list_seq_ops = {
.start = tp_list_start,
.next = tp_list_next,
.stop = tp_list_stop,
.show = tp_list_show,
};
static
int lttng_tracepoint_list_open(struct inode *inode, struct file *file)
{
return seq_open(file, &lttng_tracepoint_list_seq_ops);
}
const struct file_operations lttng_tracepoint_list_fops = {
.owner = THIS_MODULE,
.open = lttng_tracepoint_list_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-client-discard.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client (discard mode).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
#include "ltt-ring-buffer-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-client-discard.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client (discard mode).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-mmap"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
#include "ltt-ring-buffer-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Client Discard Mode");

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-client-overwrite.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client (overwrite mode).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-mmap"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
#include "ltt-ring-buffer-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-client-overwrite.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client (overwrite mode).
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
#include "ltt-ring-buffer-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Client Overwrite Mode");

View file

@ -1,569 +0,0 @@
/*
* ltt-ring-buffer-client.h
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client template.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/types.h>
#include "lib/bitfield.h"
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "wrapper/trace-clock.h"
#include "ltt-events.h"
#include "ltt-tracer.h"
#include "wrapper/ringbuffer/frontend_types.h"
/*
* Keep the natural field alignment for _each field_ within this structure if
* you ever add/remove a field from this header. Packed attribute is not used
* because gcc generates poor code on at least powerpc and mips. Don't ever
* let gcc add padding between the structure elements.
*
* The guarantee we have with timestamps is that all the events in a
* packet are included (inclusive) within the begin/end timestamps of
* the packet. Another guarantee we have is that the "timestamp begin",
* as well as the event timestamps, are monotonically increasing (never
* decrease) when moving forward in a stream (physically). But this
* guarantee does not apply to "timestamp end", because it is sampled at
* commit time, which is not ordered with respect to space reservation.
*/
struct packet_header {
/* Trace packet header */
uint32_t magic; /*
* Trace magic number.
* contains endianness information.
*/
uint8_t uuid[16];
uint32_t stream_id;
struct {
/* Stream packet context */
uint64_t timestamp_begin; /* Cycle count at subbuffer start */
uint64_t timestamp_end; /* Cycle count at subbuffer end */
uint32_t events_discarded; /*
* Events lost in this subbuffer since
* the beginning of the trace.
* (may overflow)
*/
uint32_t content_size; /* Size of data in subbuffer */
uint32_t packet_size; /* Subbuffer size (include padding) */
uint32_t cpu_id; /* CPU id associated with stream */
uint8_t header_end; /* End of header */
} ctx;
};
static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
{
return trace_clock_read64();
}
static inline
size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
{
int i;
size_t orig_offset = offset;
if (likely(!ctx))
return 0;
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
return offset - orig_offset;
}
static inline
void ctx_record(struct lib_ring_buffer_ctx *bufctx,
struct ltt_channel *chan,
struct lttng_ctx *ctx)
{
int i;
if (likely(!ctx))
return;
for (i = 0; i < ctx->nr_fields; i++)
ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
}
/*
* record_header_size - Calculate the header size and padding necessary.
* @config: ring buffer instance configuration
* @chan: channel
* @offset: offset in the write buffer
* @pre_header_padding: padding to add before the header (output)
* @ctx: reservation context
*
* Returns the event header size (including padding).
*
* The payload must itself determine its own alignment from the biggest type it
* contains.
*/
static __inline__
unsigned char record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
struct ltt_event *event = ctx->priv;
size_t orig_offset = offset;
size_t padding;
switch (ltt_chan->header_type) {
case 1: /* compact */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += padding;
if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += sizeof(uint32_t); /* id and timestamp */
} else {
/* Minimum space taken by 5-bit id */
offset += sizeof(uint8_t);
/* Align extended struct on largest member */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint32_t); /* id */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
}
break;
case 2: /* large */
padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
offset += padding;
offset += sizeof(uint16_t);
if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
offset += sizeof(uint32_t); /* timestamp */
} else {
/* Align extended struct on largest member */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint32_t); /* id */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
}
break;
default:
padding = 0;
WARN_ON_ONCE(1);
}
offset += ctx_get_size(offset, event->ctx);
offset += ctx_get_size(offset, ltt_chan->ctx);
*pre_header_padding = padding;
return offset - orig_offset;
}
#include "wrapper/ringbuffer/api.h"
static
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
uint32_t event_id);
/*
* ltt_write_event_header
*
* Writes the event header to the offset (already aligned on 32-bits).
*
* @config: ring buffer instance configuration
* @ctx: reservation context
* @event_id: event ID
*/
static __inline__
void ltt_write_event_header(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
struct ltt_event *event = ctx->priv;
if (unlikely(ctx->rflags))
goto slow_path;
switch (ltt_chan->header_type) {
case 1: /* compact */
{
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
break;
}
case 2: /* large */
{
uint32_t timestamp = (uint32_t) ctx->tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
break;
}
default:
WARN_ON_ONCE(1);
}
ctx_record(ctx, ltt_chan, ltt_chan->ctx);
ctx_record(ctx, ltt_chan, event->ctx);
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
slow_path:
ltt_write_event_header_slow(config, ctx, event_id);
}
static
void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
struct ltt_event *event = ctx->priv;
switch (ltt_chan->header_type) {
case 1: /* compact */
if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t id_time = 0;
bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
} else {
uint8_t id = 0;
uint64_t timestamp = ctx->tsc;
bt_bitfield_write(&id, uint8_t, 0, 5, 31);
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
}
break;
case 2: /* large */
{
if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
uint32_t timestamp = (uint32_t) ctx->tsc;
uint16_t id = event_id;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
} else {
uint16_t id = 65535;
uint64_t timestamp = ctx->tsc;
lib_ring_buffer_write(config, ctx, &id, sizeof(id));
/* Align extended struct on largest member */
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
}
break;
}
default:
WARN_ON_ONCE(1);
}
ctx_record(ctx, ltt_chan, ltt_chan->ctx);
ctx_record(ctx, ltt_chan, event->ctx);
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
static const struct lib_ring_buffer_config client_config;
static u64 client_ring_buffer_clock_read(struct channel *chan)
{
return lib_ring_buffer_clock_read(chan);
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
return record_header_size(config, chan, offset,
pre_header_padding, ctx);
}
/**
* client_packet_header_size - called on buffer-switch to a new sub-buffer
*
* Return header size without padding after the structure. Don't use packed
* structure because gcc generates inefficient code on some architectures
* (powerpc, mips..)
*/
static size_t client_packet_header_size(void)
{
return offsetof(struct packet_header, ctx.header_end);
}
static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
struct channel *chan = buf->backend.chan;
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
struct ltt_channel *ltt_chan = channel_get_private(chan);
struct ltt_session *session = ltt_chan->session;
header->magic = CTF_MAGIC_NUMBER;
memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
header->stream_id = ltt_chan->id;
header->ctx.timestamp_begin = tsc;
header->ctx.timestamp_end = 0;
header->ctx.events_discarded = 0;
header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
header->ctx.packet_size = 0xFFFFFFFF;
header->ctx.cpu_id = buf->backend.cpu;
}
/*
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
struct channel *chan = buf->backend.chan;
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
unsigned long records_lost = 0;
header->ctx.timestamp_end = tsc;
header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
header->ctx.events_discarded = records_lost;
}
static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
return 0;
}
static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
{
}
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
.cb.buffer_begin = client_buffer_begin,
.cb.buffer_end = client_buffer_end,
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
.tsc_bits = 32,
.alloc = RING_BUFFER_ALLOC_PER_CPU,
.sync = RING_BUFFER_SYNC_PER_CPU,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
.output = RING_BUFFER_OUTPUT_TEMPLATE,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
.ipi = RING_BUFFER_IPI_BARRIER,
.wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
};
static
struct channel *_channel_create(const char *name,
struct ltt_channel *ltt_chan, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
return channel_create(&client_config, name, ltt_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
read_timer_interval);
}
static
void ltt_channel_destroy(struct channel *chan)
{
channel_destroy(chan);
}
static
struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(&client_config, chan, cpu);
if (!lib_ring_buffer_open_read(buf))
return buf;
}
return NULL;
}
static
int ltt_buffer_has_read_closed_stream(struct channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(&client_config, chan, cpu);
if (!atomic_long_read(&buf->active_readers))
return 1;
}
return 0;
}
static
void ltt_buffer_read_close(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
static
int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
int ret, cpu;
cpu = lib_ring_buffer_get_cpu(&client_config);
if (cpu < 0)
return -EPERM;
ctx->cpu = cpu;
switch (ltt_chan->header_type) {
case 1: /* compact */
if (event_id > 30)
ctx->rflags |= LTT_RFLAG_EXTENDED;
break;
case 2: /* large */
if (event_id > 65534)
ctx->rflags |= LTT_RFLAG_EXTENDED;
break;
default:
WARN_ON_ONCE(1);
}
ret = lib_ring_buffer_reserve(&client_config, ctx);
if (ret)
goto put;
ltt_write_event_header(&client_config, ctx, event_id);
return 0;
put:
lib_ring_buffer_put_cpu(&client_config);
return ret;
}
static
void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
lib_ring_buffer_put_cpu(&client_config);
}
static
void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
size_t len)
{
lib_ring_buffer_write(&client_config, ctx, src, len);
}
static
void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
}
static
void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
int c, size_t len)
{
lib_ring_buffer_memset(&client_config, ctx, c, len);
}
static
wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
{
struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
return &buf->write_wait;
}
static
wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
{
return &chan->hp_wait;
}
static
int ltt_is_finalized(struct channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
int ltt_is_disabled(struct channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
.owner = THIS_MODULE,
.ops = {
.channel_create = _channel_create,
.channel_destroy = ltt_channel_destroy,
.buffer_read_open = ltt_buffer_read_open,
.buffer_has_read_closed_stream =
ltt_buffer_has_read_closed_stream,
.buffer_read_close = ltt_buffer_read_close,
.event_reserve = ltt_event_reserve,
.event_commit = ltt_event_commit,
.event_write = ltt_event_write,
.event_write_from_user = ltt_event_write_from_user,
.event_memset = ltt_event_memset,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
.get_hp_wait_queue = ltt_get_hp_wait_queue,
.is_finalized = ltt_is_finalized,
.is_disabled = ltt_is_disabled,
},
};
static int __init ltt_ring_buffer_client_init(void)
{
/*
* This vmalloc sync all also takes care of the lib ring buffer
* vmalloc'd module pages when it is built as a module into LTTng.
*/
wrapper_vmalloc_sync_all();
ltt_transport_register(&ltt_relay_transport);
return 0;
}
module_init(ltt_ring_buffer_client_init);
static void __exit ltt_ring_buffer_client_exit(void)
{
ltt_transport_unregister(&ltt_relay_transport);
}
module_exit(ltt_ring_buffer_client_exit);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
" client");

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-metadata-client.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer metadta client.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_SPLICE
#include "ltt-ring-buffer-metadata-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");

View file

@ -1,330 +0,0 @@
/*
* ltt-ring-buffer-client.h
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer client template.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/types.h>
#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "ltt-events.h"
#include "ltt-tracer.h"
struct metadata_packet_header {
uint32_t magic; /* 0x75D11D57 */
uint8_t uuid[16]; /* Unique Universal Identifier */
uint32_t checksum; /* 0 if unused */
uint32_t content_size; /* in bits */
uint32_t packet_size; /* in bits */
uint8_t compression_scheme; /* 0 if unused */
uint8_t encryption_scheme; /* 0 if unused */
uint8_t checksum_scheme; /* 0 if unused */
uint8_t major; /* CTF spec major version number */
uint8_t minor; /* CTF spec minor version number */
uint8_t header_end[0];
};
struct metadata_record_header {
uint8_t header_end[0]; /* End of header */
};
static const struct lib_ring_buffer_config client_config;
static inline
u64 lib_ring_buffer_clock_read(struct channel *chan)
{
return 0;
}
static inline
unsigned char record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
return 0;
}
#include "wrapper/ringbuffer/api.h"
static u64 client_ring_buffer_clock_read(struct channel *chan)
{
return 0;
}
static
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
return 0;
}
/**
* client_packet_header_size - called on buffer-switch to a new sub-buffer
*
* Return header size without padding after the structure. Don't use packed
* structure because gcc generates inefficient code on some architectures
* (powerpc, mips..)
*/
static size_t client_packet_header_size(void)
{
return offsetof(struct metadata_packet_header, header_end);
}
static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx)
{
struct channel *chan = buf->backend.chan;
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
struct ltt_channel *ltt_chan = channel_get_private(chan);
struct ltt_session *session = ltt_chan->session;
header->magic = TSDL_MAGIC_NUMBER;
memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
header->checksum = 0; /* 0 if unused */
header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
header->compression_scheme = 0; /* 0 if unused */
header->encryption_scheme = 0; /* 0 if unused */
header->checksum_scheme = 0; /* 0 if unused */
header->major = CTF_SPEC_MAJOR;
header->minor = CTF_SPEC_MINOR;
}
/*
* offset is assumed to never be 0 here : never deliver a completely empty
* subbuffer. data_size is between 1 and subbuf_size.
*/
static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
unsigned int subbuf_idx, unsigned long data_size)
{
struct channel *chan = buf->backend.chan;
struct metadata_packet_header *header =
(struct metadata_packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
subbuf_idx * chan->backend.subbuf_size);
unsigned long records_lost = 0;
header->content_size = data_size * CHAR_BIT; /* in bits */
header->packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
/*
* We do not care about the records lost count, because the metadata
* channel waits and retry.
*/
(void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
WARN_ON_ONCE(records_lost != 0);
}
static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
int cpu, const char *name)
{
return 0;
}
static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
{
}
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.cb.subbuffer_header_size = client_packet_header_size,
.cb.buffer_begin = client_buffer_begin,
.cb.buffer_end = client_buffer_end,
.cb.buffer_create = client_buffer_create,
.cb.buffer_finalize = client_buffer_finalize,
.tsc_bits = 0,
.alloc = RING_BUFFER_ALLOC_GLOBAL,
.sync = RING_BUFFER_SYNC_GLOBAL,
.mode = RING_BUFFER_MODE_TEMPLATE,
.backend = RING_BUFFER_PAGE,
.output = RING_BUFFER_OUTPUT_TEMPLATE,
.oops = RING_BUFFER_OOPS_CONSISTENCY,
.ipi = RING_BUFFER_IPI_BARRIER,
.wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
};
static
struct channel *_channel_create(const char *name,
struct ltt_channel *ltt_chan, void *buf_addr,
size_t subbuf_size, size_t num_subbuf,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
return channel_create(&client_config, name, ltt_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
read_timer_interval);
}
static
void ltt_channel_destroy(struct channel *chan)
{
channel_destroy(chan);
}
static
struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
{
struct lib_ring_buffer *buf;
buf = channel_get_ring_buffer(&client_config, chan, 0);
if (!lib_ring_buffer_open_read(buf))
return buf;
return NULL;
}
static
int ltt_buffer_has_read_closed_stream(struct channel *chan)
{
struct lib_ring_buffer *buf;
int cpu;
for_each_channel_cpu(cpu, chan) {
buf = channel_get_ring_buffer(&client_config, chan, cpu);
if (!atomic_long_read(&buf->active_readers))
return 1;
}
return 0;
}
static
void ltt_buffer_read_close(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
}
static
int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id)
{
return lib_ring_buffer_reserve(&client_config, ctx);
}
static
void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
{
lib_ring_buffer_commit(&client_config, ctx);
}
static
void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
size_t len)
{
lib_ring_buffer_write(&client_config, ctx, src, len);
}
static
void ltt_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
const void __user *src, size_t len)
{
lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
}
static
void ltt_event_memset(struct lib_ring_buffer_ctx *ctx,
int c, size_t len)
{
lib_ring_buffer_memset(&client_config, ctx, c, len);
}
static
size_t ltt_packet_avail_size(struct channel *chan)
{
unsigned long o_begin;
struct lib_ring_buffer *buf;
buf = chan->backend.buf; /* Only for global buffer ! */
o_begin = v_read(&client_config, &buf->offset);
if (subbuf_offset(o_begin, chan) != 0) {
return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
} else {
return chan->backend.subbuf_size - subbuf_offset(o_begin, chan)
- sizeof(struct metadata_packet_header);
}
}
static
wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
{
struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
chan, cpu);
return &buf->write_wait;
}
static
wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
{
return &chan->hp_wait;
}
static
int ltt_is_finalized(struct channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
int ltt_is_disabled(struct channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}
static struct ltt_transport ltt_relay_transport = {
.name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
.owner = THIS_MODULE,
.ops = {
.channel_create = _channel_create,
.channel_destroy = ltt_channel_destroy,
.buffer_read_open = ltt_buffer_read_open,
.buffer_has_read_closed_stream =
ltt_buffer_has_read_closed_stream,
.buffer_read_close = ltt_buffer_read_close,
.event_reserve = ltt_event_reserve,
.event_commit = ltt_event_commit,
.event_write_from_user = ltt_event_write_from_user,
.event_memset = ltt_event_memset,
.event_write = ltt_event_write,
.packet_avail_size = ltt_packet_avail_size,
.get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
.get_hp_wait_queue = ltt_get_hp_wait_queue,
.is_finalized = ltt_is_finalized,
.is_disabled = ltt_is_disabled,
},
};
static int __init ltt_ring_buffer_client_init(void)
{
/*
* This vmalloc sync all also takes care of the lib ring buffer
* vmalloc'd module pages when it is built as a module into LTTng.
*/
wrapper_vmalloc_sync_all();
ltt_transport_register(&ltt_relay_transport);
return 0;
}
module_init(ltt_ring_buffer_client_init);
static void __exit ltt_ring_buffer_client_exit(void)
{
ltt_transport_unregister(&ltt_relay_transport);
}
module_exit(ltt_ring_buffer_client_exit);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
" client");

View file

@ -1,21 +0,0 @@
/*
* ltt-ring-buffer-metadata-client.c
*
* Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng lib ring buffer metadta client.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include "ltt-tracer.h"
#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata-mmap"
#define RING_BUFFER_OUTPUT_TEMPLATE RING_BUFFER_MMAP
#include "ltt-ring-buffer-metadata-client.h"
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("LTTng Ring Buffer Metadata Client");

View file

@ -1,28 +0,0 @@
#ifndef LTT_TRACER_CORE_H
#define LTT_TRACER_CORE_H
/*
* ltt-tracer-core.h
*
* Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This contains the core definitions for the Linux Trace Toolkit.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/list.h>
#include <linux/percpu.h>
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/* Align data on its natural alignment */
#define RING_BUFFER_ALIGN
#endif
#include "wrapper/ringbuffer/config.h"
struct ltt_session;
struct ltt_channel;
struct ltt_event;
#endif /* LTT_TRACER_CORE_H */

View file

@ -1,67 +0,0 @@
#ifndef _LTT_TRACER_H
#define _LTT_TRACER_H
/*
* ltt-tracer.h
*
* Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This contains the definitions for the Linux Trace Toolkit tracer.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <stdarg.h>
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/cache.h>
#include <linux/timex.h>
#include <linux/wait.h>
#include <asm/atomic.h>
#include <asm/local.h>
#include "wrapper/trace-clock.h"
#include "ltt-tracer-core.h"
#include "ltt-events.h"
#define LTTNG_VERSION 0
#define LTTNG_PATCHLEVEL 9
#define LTTNG_SUBLEVEL 1
#ifndef CHAR_BIT
#define CHAR_BIT 8
#endif
/* Number of bytes to log with a read/write event */
#define LTT_LOG_RW_SIZE 32L
#define LTT_MAX_SMALL_SIZE 0xFFFFU
#ifdef RING_BUFFER_ALIGN
#define ltt_alignof(type) __alignof__(type)
#else
#define ltt_alignof(type) 1
#endif
/* Tracer properties */
#define CTF_MAGIC_NUMBER 0xC1FC1FC1
#define TSDL_MAGIC_NUMBER 0x75D11D57
/* CTF specification version followed */
#define CTF_SPEC_MAJOR 1
#define CTF_SPEC_MINOR 8
/* Tracer major/minor versions */
#define CTF_VERSION_MAJOR 0
#define CTF_VERSION_MINOR 1
/*
* Number of milliseconds to retry before failing metadata writes on buffer full
* condition. (10 seconds)
*/
#define LTTNG_METADATA_TIMEOUT_MSEC 10000
#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
#endif /* _LTT_TRACER_H */

View file

@ -1,30 +0,0 @@
/*
* lttng-calibrate.c
*
* Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng probe calibration.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include "ltt-debugfs-abi.h"
#include "ltt-events.h"
noinline
void lttng_calibrate_kretprobe(void)
{
asm volatile ("");
}
int lttng_calibrate(struct lttng_kernel_calibrate *calibrate)
{
switch (calibrate->type) {
case LTTNG_KERNEL_CALIBRATE_KRETPROBE:
lttng_calibrate_kretprobe();
break;
default:
return -EINVAL;
}
return 0;
}

View file

@ -1,68 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng nice context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t nice_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(int));
size += sizeof(int);
return size;
}
static
void nice_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
int nice;
nice = task_nice(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(nice));
chan->ops->event_write(ctx, &nice, sizeof(nice));
}
int lttng_add_nice_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "nice")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "nice";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = nice_get_size;
field->record = nice_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Nice Context");

View file

@ -1,271 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng performance monitoring counters (perf-counters) integration module.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/list.h>
#include <linux/string.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "wrapper/perf.h"
#include "ltt-tracer.h"
static
size_t perf_counter_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
size += sizeof(uint64_t);
return size;
}
static
void perf_counter_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
struct perf_event *event;
uint64_t value;
event = field->u.perf_counter->e[ctx->cpu];
if (likely(event)) {
if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
value = 0;
} else {
event->pmu->read(event);
value = local64_read(&event->count);
}
} else {
/*
* Perf chooses not to be clever and not to support enabling a
* perf counter before the cpu is brought up. Therefore, we need
* to support having events coming (e.g. scheduler events)
* before the counter is setup. Write an arbitrary 0 in this
* case.
*/
value = 0;
}
lib_ring_buffer_align_ctx(ctx, ltt_alignof(value));
chan->ops->event_write(ctx, &value, sizeof(value));
}
#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
static
void overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
}
#else
static
void overflow_callback(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
}
#endif
static
void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
{
struct perf_event **events = field->u.perf_counter->e;
int cpu;
get_online_cpus();
for_each_online_cpu(cpu)
perf_event_release_kernel(events[cpu]);
put_online_cpus();
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&field->u.perf_counter->nb);
#endif
kfree(field->event_field.name);
kfree(field->u.perf_counter->attr);
kfree(events);
kfree(field->u.perf_counter);
}
#ifdef CONFIG_HOTPLUG_CPU
/**
* lttng_perf_counter_hp_callback - CPU hotplug callback
* @nb: notifier block
* @action: hotplug action to take
* @hcpu: CPU number
*
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*
* We can setup perf counters when the cpu is online (up prepare seems to be too
* soon).
*/
static
int __cpuinit lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long) hcpu;
struct lttng_perf_counter_field *perf_field =
container_of(nb, struct lttng_perf_counter_field, nb);
struct perf_event **events = perf_field->e;
struct perf_event_attr *attr = perf_field->attr;
struct perf_event *pevent;
if (!perf_field->hp_enable)
return NOTIFY_OK;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
pevent = wrapper_perf_event_create_kernel_counter(attr,
cpu, NULL, overflow_callback);
if (!pevent || IS_ERR(pevent))
return NOTIFY_BAD;
if (pevent->state == PERF_EVENT_STATE_ERROR) {
perf_event_release_kernel(pevent);
return NOTIFY_BAD;
}
barrier(); /* Create perf counter before setting event */
events[cpu] = pevent;
break;
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
pevent = events[cpu];
events[cpu] = NULL;
barrier(); /* NULLify event before perf counter teardown */
perf_event_release_kernel(pevent);
break;
}
return NOTIFY_OK;
}
#endif
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
struct lttng_perf_counter_field *perf_field;
struct perf_event **events;
struct perf_event_attr *attr;
int ret;
int cpu;
char *name_alloc;
events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
if (!events)
return -ENOMEM;
attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
if (!attr) {
ret = -ENOMEM;
goto error_attr;
}
attr->type = type;
attr->config = config;
attr->size = sizeof(struct perf_event_attr);
attr->pinned = 1;
attr->disabled = 0;
perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
if (!perf_field) {
ret = -ENOMEM;
goto error_alloc_perf_field;
}
perf_field->e = events;
perf_field->attr = attr;
name_alloc = kstrdup(name, GFP_KERNEL);
if (!name_alloc) {
ret = -ENOMEM;
goto name_alloc_error;
}
field = lttng_append_context(ctx);
if (!field) {
ret = -ENOMEM;
goto append_context_error;
}
if (lttng_find_context(*ctx, name_alloc)) {
ret = -EEXIST;
goto find_error;
}
#ifdef CONFIG_HOTPLUG_CPU
perf_field->nb.notifier_call =
lttng_perf_counter_cpu_hp_callback;
perf_field->nb.priority = 0;
register_cpu_notifier(&perf_field->nb);
#endif
get_online_cpus();
for_each_online_cpu(cpu) {
events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
cpu, NULL, overflow_callback);
if (!events[cpu] || IS_ERR(events[cpu])) {
ret = -EINVAL;
goto counter_error;
}
if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
ret = -EBUSY;
goto counter_busy;
}
}
put_online_cpus();
field->destroy = lttng_destroy_perf_counter_field;
field->event_field.name = name_alloc;
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(uint64_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(uint64_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = perf_counter_get_size;
field->record = perf_counter_record;
field->u.perf_counter = perf_field;
perf_field->hp_enable = 1;
wrapper_vmalloc_sync_all();
return 0;
counter_busy:
counter_error:
for_each_online_cpu(cpu) {
if (events[cpu] && !IS_ERR(events[cpu]))
perf_event_release_kernel(events[cpu]);
}
put_online_cpus();
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&perf_field->nb);
#endif
find_error:
lttng_remove_context_field(ctx, field);
append_context_error:
kfree(name_alloc);
name_alloc_error:
kfree(perf_field);
error_alloc_perf_field:
kfree(attr);
error_attr:
kfree(events);
return ret;
}
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");

View file

@ -1,68 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng PID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t pid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void pid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t pid;
pid = task_tgid_nr(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(pid));
chan->ops->event_write(ctx, &pid, sizeof(pid));
}
int lttng_add_pid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "pid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "pid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = pid_get_size;
field->record = pid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit PID Context");

View file

@ -1,71 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng PPID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t ppid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void ppid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t ppid;
rcu_read_lock();
ppid = task_tgid_nr(current->real_parent);
rcu_read_unlock();
lib_ring_buffer_align_ctx(ctx, ltt_alignof(ppid));
chan->ops->event_write(ctx, &ppid, sizeof(ppid));
}
int lttng_add_ppid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "ppid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "ppid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = ppid_get_size;
field->record = ppid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit PPID Context");

View file

@ -1,89 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng priority context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "wrapper/kallsyms.h"
#include "ltt-tracer.h"
static
int (*wrapper_task_prio_sym)(struct task_struct *t);
int wrapper_task_prio_init(void)
{
wrapper_task_prio_sym = (void *) kallsyms_lookup_funcptr("task_prio");
if (!wrapper_task_prio_sym) {
printk(KERN_WARNING "LTTng: task_prio symbol lookup failed.\n");
return -EINVAL;
}
return 0;
}
static
size_t prio_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(int));
size += sizeof(int);
return size;
}
static
void prio_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
int prio;
prio = wrapper_task_prio_sym(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(prio));
chan->ops->event_write(ctx, &prio, sizeof(prio));
}
int lttng_add_prio_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
int ret;
if (!wrapper_task_prio_sym) {
ret = wrapper_task_prio_init();
if (ret)
return ret;
}
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "prio")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "prio";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(int) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(int) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(int);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = prio_get_size;
field->record = prio_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Priority Context");

View file

@ -1,72 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng procname context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t procname_get_size(size_t offset)
{
size_t size = 0;
size += sizeof(current->comm);
return size;
}
/*
* Racy read of procname. We simply copy its whole array size.
* Races with /proc/<task>/procname write only.
* Otherwise having to take a mutex for each event is cumbersome and
* could lead to crash in IRQ context and deadlock of the lockdep tracer.
*/
static
void procname_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
chan->ops->event_write(ctx, current->comm, sizeof(current->comm));
}
int lttng_add_procname_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "procname")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "procname";
field->event_field.type.atype = atype_array;
field->event_field.type.u.array.elem_type.atype = atype_integer;
field->event_field.type.u.array.elem_type.u.basic.integer.size = sizeof(char) * CHAR_BIT;
field->event_field.type.u.array.elem_type.u.basic.integer.alignment = ltt_alignof(char) * CHAR_BIT;
field->event_field.type.u.array.elem_type.u.basic.integer.signedness = is_signed_type(char);
field->event_field.type.u.array.elem_type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.array.elem_type.u.basic.integer.base = 10;
field->event_field.type.u.array.elem_type.u.basic.integer.encoding = lttng_encode_UTF8;
field->event_field.type.u.array.length = sizeof(current->comm);
field->get_size = procname_get_size;
field->record = procname_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");

View file

@ -1,68 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng TID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t tid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void tid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t tid;
tid = task_pid_nr(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(tid));
chan->ops->event_write(ctx, &tid, sizeof(tid));
}
int lttng_add_tid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "tid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "tid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = tid_get_size;
field->record = tid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit TID Context");

View file

@ -1,74 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng vPID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t vpid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void vpid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t vpid;
/*
* nsproxy can be NULL when scheduled out of exit.
*/
if (!current->nsproxy)
vpid = 0;
else
vpid = task_tgid_vnr(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(vpid));
chan->ops->event_write(ctx, &vpid, sizeof(vpid));
}
int lttng_add_vpid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "vpid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "vpid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = vpid_get_size;
field->record = vpid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit vPID Context");

View file

@ -1,79 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng vPPID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t vppid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void vppid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
struct task_struct *parent;
pid_t vppid;
/*
* nsproxy can be NULL when scheduled out of exit.
*/
rcu_read_lock();
parent = rcu_dereference(current->real_parent);
if (!parent->nsproxy)
vppid = 0;
else
vppid = task_tgid_vnr(parent);
rcu_read_unlock();
lib_ring_buffer_align_ctx(ctx, ltt_alignof(vppid));
chan->ops->event_write(ctx, &vppid, sizeof(vppid));
}
int lttng_add_vppid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "vppid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "vppid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = vppid_get_size;
field->record = vppid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit vPPID Context");

View file

@ -1,74 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng vTID context.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "ltt-events.h"
#include "wrapper/ringbuffer/frontend_types.h"
#include "wrapper/vmalloc.h"
#include "ltt-tracer.h"
static
size_t vtid_get_size(size_t offset)
{
size_t size = 0;
size += lib_ring_buffer_align(offset, ltt_alignof(pid_t));
size += sizeof(pid_t);
return size;
}
static
void vtid_record(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx,
struct ltt_channel *chan)
{
pid_t vtid;
/*
* nsproxy can be NULL when scheduled out of exit.
*/
if (!current->nsproxy)
vtid = 0;
else
vtid = task_pid_vnr(current);
lib_ring_buffer_align_ctx(ctx, ltt_alignof(vtid));
chan->ops->event_write(ctx, &vtid, sizeof(vtid));
}
int lttng_add_vtid_to_ctx(struct lttng_ctx **ctx)
{
struct lttng_ctx_field *field;
field = lttng_append_context(ctx);
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, "vtid")) {
lttng_remove_context_field(ctx, field);
return -EEXIST;
}
field->event_field.name = "vtid";
field->event_field.type.atype = atype_integer;
field->event_field.type.u.basic.integer.size = sizeof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.alignment = ltt_alignof(pid_t) * CHAR_BIT;
field->event_field.type.u.basic.integer.signedness = is_signed_type(pid_t);
field->event_field.type.u.basic.integer.reverse_byte_order = 0;
field->event_field.type.u.basic.integer.base = 10;
field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
field->get_size = vtid_get_size;
field->record = vtid_record;
wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit vTID Context");

View file

@ -1,438 +0,0 @@
/*
* lttng-syscalls.c
*
* Copyright 2010-2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng syscall probes.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/compat.h>
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include "ltt-events.h"
#ifndef CONFIG_COMPAT
static inline int is_compat_task(void)
{
return 0;
}
#endif
static
void syscall_entry_probe(void *__data, struct pt_regs *regs, long id);
/*
* Take care of NOARGS not supported by mainline.
*/
#define DECLARE_EVENT_CLASS_NOARGS(name, tstruct, assign, print)
#define DEFINE_EVENT_NOARGS(template, name)
#define TRACE_EVENT_NOARGS(name, struct, assign, print)
/*
* Create LTTng tracepoint probes.
*/
#define LTTNG_PACKAGE_BUILD
#define CREATE_TRACE_POINTS
#define TP_MODULE_OVERRIDE
#define TRACE_INCLUDE_PATH ../instrumentation/syscalls/headers
#define PARAMS(args...) args
#undef TRACE_SYSTEM
/* Hijack probe callback for system calls */
#undef TP_PROBE_CB
#define TP_PROBE_CB(_template) &syscall_entry_probe
#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
TRACE_EVENT(_name, PARAMS(_proto), PARAMS(_args),\
PARAMS(_struct), PARAMS(_assign), PARAMS(_printk))
#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_struct), PARAMS(_assign),\
PARAMS(_printk))
#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
DEFINE_EVENT_NOARGS(_template, _name)
#define TRACE_SYSTEM syscalls_integers
#include "instrumentation/syscalls/headers/syscalls_integers.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM syscalls_pointers
#include "instrumentation/syscalls/headers/syscalls_pointers.h"
#undef TRACE_SYSTEM
#undef SC_TRACE_EVENT
#undef SC_DECLARE_EVENT_CLASS_NOARGS
#undef SC_DEFINE_EVENT_NOARGS
#define TRACE_SYSTEM syscalls_unknown
#include "instrumentation/syscalls/headers/syscalls_unknown.h"
#undef TRACE_SYSTEM
/* For compat syscalls */
#undef _TRACE_SYSCALLS_integers_H
#undef _TRACE_SYSCALLS_pointers_H
/* Hijack probe callback for system calls */
#undef TP_PROBE_CB
#define TP_PROBE_CB(_template) &syscall_entry_probe
#define SC_TRACE_EVENT(_name, _proto, _args, _struct, _assign, _printk) \
TRACE_EVENT(compat_##_name, PARAMS(_proto), PARAMS(_args), \
PARAMS(_struct), PARAMS(_assign), \
PARAMS(_printk))
#define SC_DECLARE_EVENT_CLASS_NOARGS(_name, _struct, _assign, _printk) \
DECLARE_EVENT_CLASS_NOARGS(compat_##_name, PARAMS(_struct), \
PARAMS(_assign), PARAMS(_printk))
#define SC_DEFINE_EVENT_NOARGS(_template, _name) \
DEFINE_EVENT_NOARGS(compat_##_template, compat_##_name)
#define TRACE_SYSTEM compat_syscalls_integers
#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM compat_syscalls_pointers
#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
#undef TRACE_SYSTEM
#undef SC_TRACE_EVENT
#undef SC_DECLARE_EVENT_CLASS_NOARGS
#undef SC_DEFINE_EVENT_NOARGS
#undef TP_PROBE_CB
#undef TP_MODULE_OVERRIDE
#undef LTTNG_PACKAGE_BUILD
#undef CREATE_TRACE_POINTS
struct trace_syscall_entry {
void *func;
const struct lttng_event_desc *desc;
const struct lttng_event_field *fields;
unsigned int nrargs;
};
#define CREATE_SYSCALL_TABLE
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
.func = __event_probe__##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___##_template, \
.desc = &__event_desc___##_name, \
},
static const struct trace_syscall_entry sc_table[] = {
#include "instrumentation/syscalls/headers/syscalls_integers.h"
#include "instrumentation/syscalls/headers/syscalls_pointers.h"
};
#undef TRACE_SYSCALL_TABLE
#define TRACE_SYSCALL_TABLE(_template, _name, _nr, _nrargs) \
[ _nr ] = { \
.func = __event_probe__##compat_##_template, \
.nrargs = (_nrargs), \
.fields = __event_fields___##compat_##_template,\
.desc = &__event_desc___##compat_##_name, \
},
/* Create compatibility syscall table */
const struct trace_syscall_entry compat_sc_table[] = {
#include "instrumentation/syscalls/headers/compat_syscalls_integers.h"
#include "instrumentation/syscalls/headers/compat_syscalls_pointers.h"
};
#undef CREATE_SYSCALL_TABLE
static void syscall_entry_unknown(struct ltt_event *event,
struct pt_regs *regs, unsigned int id)
{
unsigned long args[UNKNOWN_SYSCALL_NRARGS];
syscall_get_arguments(current, regs, 0, UNKNOWN_SYSCALL_NRARGS, args);
if (unlikely(is_compat_task()))
__event_probe__compat_sys_unknown(event, id, args);
else
__event_probe__sys_unknown(event, id, args);
}
void syscall_entry_probe(void *__data, struct pt_regs *regs, long id)
{
struct ltt_channel *chan = __data;
struct ltt_event *event, *unknown_event;
const struct trace_syscall_entry *table, *entry;
size_t table_len;
if (unlikely(is_compat_task())) {
table = compat_sc_table;
table_len = ARRAY_SIZE(compat_sc_table);
unknown_event = chan->sc_compat_unknown;
} else {
table = sc_table;
table_len = ARRAY_SIZE(sc_table);
unknown_event = chan->sc_unknown;
}
if (unlikely(id >= table_len)) {
syscall_entry_unknown(unknown_event, regs, id);
return;
}
if (unlikely(is_compat_task()))
event = chan->compat_sc_table[id];
else
event = chan->sc_table[id];
if (unlikely(!event)) {
syscall_entry_unknown(unknown_event, regs, id);
return;
}
entry = &table[id];
WARN_ON_ONCE(!entry);
switch (entry->nrargs) {
case 0:
{
void (*fptr)(void *__data) = entry->func;
fptr(event);
break;
}
case 1:
{
void (*fptr)(void *__data, unsigned long arg0) = entry->func;
unsigned long args[1];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0]);
break;
}
case 2:
{
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1) = entry->func;
unsigned long args[2];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0], args[1]);
break;
}
case 3:
{
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2) = entry->func;
unsigned long args[3];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0], args[1], args[2]);
break;
}
case 4:
{
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3) = entry->func;
unsigned long args[4];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0], args[1], args[2], args[3]);
break;
}
case 5:
{
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4) = entry->func;
unsigned long args[5];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0], args[1], args[2], args[3], args[4]);
break;
}
case 6:
{
void (*fptr)(void *__data,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5) = entry->func;
unsigned long args[6];
syscall_get_arguments(current, regs, 0, entry->nrargs, args);
fptr(event, args[0], args[1], args[2],
args[3], args[4], args[5]);
break;
}
default:
break;
}
}
/* noinline to diminish caller stack size */
static
int fill_table(const struct trace_syscall_entry *table, size_t table_len,
struct ltt_event **chan_table, struct ltt_channel *chan, void *filter)
{
const struct lttng_event_desc *desc;
unsigned int i;
/* Allocate events for each syscall, insert into table */
for (i = 0; i < table_len; i++) {
struct lttng_kernel_event ev;
desc = table[i].desc;
if (!desc) {
/* Unknown syscall */
continue;
}
/*
* Skip those already populated by previous failed
* register for this channel.
*/
if (chan_table[i])
continue;
memset(&ev, 0, sizeof(ev));
strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
ev.instrumentation = LTTNG_KERNEL_NOOP;
chan_table[i] = ltt_event_create(chan, &ev, filter,
desc);
if (!chan_table[i]) {
/*
* If something goes wrong in event registration
* after the first one, we have no choice but to
* leave the previous events in there, until
* deleted by session teardown.
*/
return -EINVAL;
}
}
return 0;
}
int lttng_syscalls_register(struct ltt_channel *chan, void *filter)
{
struct lttng_kernel_event ev;
int ret;
wrapper_vmalloc_sync_all();
if (!chan->sc_table) {
/* create syscall table mapping syscall to events */
chan->sc_table = kzalloc(sizeof(struct ltt_event *)
* ARRAY_SIZE(sc_table), GFP_KERNEL);
if (!chan->sc_table)
return -ENOMEM;
}
#ifdef CONFIG_COMPAT
if (!chan->compat_sc_table) {
/* create syscall table mapping compat syscall to events */
chan->compat_sc_table = kzalloc(sizeof(struct ltt_event *)
* ARRAY_SIZE(compat_sc_table), GFP_KERNEL);
if (!chan->compat_sc_table)
return -ENOMEM;
}
#endif
if (!chan->sc_unknown) {
const struct lttng_event_desc *desc =
&__event_desc___sys_unknown;
memset(&ev, 0, sizeof(ev));
strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
ev.instrumentation = LTTNG_KERNEL_NOOP;
chan->sc_unknown = ltt_event_create(chan, &ev, filter,
desc);
if (!chan->sc_unknown) {
return -EINVAL;
}
}
if (!chan->sc_compat_unknown) {
const struct lttng_event_desc *desc =
&__event_desc___compat_sys_unknown;
memset(&ev, 0, sizeof(ev));
strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
ev.instrumentation = LTTNG_KERNEL_NOOP;
chan->sc_compat_unknown = ltt_event_create(chan, &ev, filter,
desc);
if (!chan->sc_compat_unknown) {
return -EINVAL;
}
}
if (!chan->sc_exit) {
const struct lttng_event_desc *desc =
&__event_desc___exit_syscall;
memset(&ev, 0, sizeof(ev));
strncpy(ev.name, desc->name, LTTNG_SYM_NAME_LEN);
ev.name[LTTNG_SYM_NAME_LEN - 1] = '\0';
ev.instrumentation = LTTNG_KERNEL_NOOP;
chan->sc_exit = ltt_event_create(chan, &ev, filter,
desc);
if (!chan->sc_exit) {
return -EINVAL;
}
}
ret = fill_table(sc_table, ARRAY_SIZE(sc_table),
chan->sc_table, chan, filter);
if (ret)
return ret;
#ifdef CONFIG_COMPAT
ret = fill_table(compat_sc_table, ARRAY_SIZE(compat_sc_table),
chan->compat_sc_table, chan, filter);
if (ret)
return ret;
#endif
ret = tracepoint_probe_register("sys_enter",
(void *) syscall_entry_probe, chan);
if (ret)
return ret;
/*
* We change the name of sys_exit tracepoint due to namespace
* conflict with sys_exit syscall entry.
*/
ret = tracepoint_probe_register("sys_exit",
(void *) __event_probe__exit_syscall,
chan->sc_exit);
if (ret) {
WARN_ON_ONCE(tracepoint_probe_unregister("sys_enter",
(void *) syscall_entry_probe, chan));
}
return ret;
}
/*
* Only called at session destruction.
*/
int lttng_syscalls_unregister(struct ltt_channel *chan)
{
int ret;
if (!chan->sc_table)
return 0;
ret = tracepoint_probe_unregister("sys_exit",
(void *) __event_probe__exit_syscall,
chan->sc_exit);
if (ret)
return ret;
ret = tracepoint_probe_unregister("sys_enter",
(void *) syscall_entry_probe, chan);
if (ret)
return ret;
/* ltt_event destroy will be performed by ltt_session_destroy() */
kfree(chan->sc_table);
#ifdef CONFIG_COMPAT
kfree(chan->compat_sc_table);
#endif
return 0;
}

View file

@ -1,37 +0,0 @@
#
# Makefile for the LTT probes.
#
ccflags-y += -I$(PWD)/probes
obj-m += lttng-types.o
obj-m += lttng-probe-lttng.o
obj-m += lttng-probe-sched.o
obj-m += lttng-probe-irq.o
ifneq ($(CONFIG_KVM),)
obj-m += lttng-probe-kvm.o
endif
ifneq ($(CONFIG_BLOCK),)
ifneq ($(CONFIG_EVENT_TRACING),) # need blk_cmd_buf_len
obj-m += $(shell \
if [ $(VERSION) -ge 3 \
-o \( $(VERSION) -eq 2 -a $(PATCHLEVEL) -ge 6 -a $(SUBLEVEL) -ge 38 \) ] ; then \
echo "lttng-probe-block.o" ; fi;)
endif
endif
ifneq ($(CONFIG_KPROBES),)
obj-m += lttng-kprobes.o
endif
ifneq ($(CONFIG_KRETPROBES),)
obj-m += lttng-kretprobes.o
endif
ifneq ($(CONFIG_DYNAMIC_FTRACE),)
obj-m += lttng-ftrace.o
endif

View file

@ -1,132 +0,0 @@
/*
* define_trace.h
*
* Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
* Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
/*
* Trace files that want to automate creationg of all tracepoints defined
* in their file should include this file. The following are macros that the
* trace file may define:
*
* TRACE_SYSTEM defines the system the tracepoint is for
*
* TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
* This macro may be defined to tell define_trace.h what file to include.
* Note, leave off the ".h".
*
* TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
* then this macro can define the path to use. Note, the path is relative to
* define_trace.h, not the file including it. Full path names for out of tree
* modules must be used.
*/
#ifdef CREATE_TRACE_POINTS
/* Prevent recursion */
#undef CREATE_TRACE_POINTS
#include <linux/stringify.h>
/*
* module.h includes tracepoints, and because ftrace.h
* pulls in module.h:
* trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
* linux/ftrace.h -> linux/module.h
* we must include module.h here before we play with any of
* the TRACE_EVENT() macros, otherwise the tracepoints included
* by module.h may break the build.
*/
#include <linux/module.h>
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DEFINE_TRACE(name)
#undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
TRACE_EVENT(name, \
PARAMS(proto), \
PARAMS(args), \
PARAMS(tstruct), \
PARAMS(assign), \
PARAMS(print))
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_TRACE(name)
#undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name)
#undef TRACE_INCLUDE
#undef __TRACE_INCLUDE
#ifndef TRACE_INCLUDE_FILE
# define TRACE_INCLUDE_FILE TRACE_SYSTEM
# define UNDEF_TRACE_INCLUDE_FILE
#endif
#ifndef TRACE_INCLUDE_PATH
# define __TRACE_INCLUDE(system) <trace/events/system.h>
# define UNDEF_TRACE_INCLUDE_PATH
#else
# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
#endif
# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
/* Let the trace headers be reread */
#define TRACE_HEADER_MULTI_READ
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/* Make all open coded DECLARE_TRACE nops */
#undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args)
#ifdef LTTNG_PACKAGE_BUILD
#include "lttng-events.h"
#endif
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_CONDITION
#undef DECLARE_EVENT_CLASS
#undef DEFINE_EVENT
#undef DEFINE_EVENT_PRINT
#undef DEFINE_EVENT_CONDITION
#undef TRACE_HEADER_MULTI_READ
#undef DECLARE_TRACE
/* Only undef what we defined in this file */
#ifdef UNDEF_TRACE_INCLUDE_FILE
# undef TRACE_INCLUDE_FILE
# undef UNDEF_TRACE_INCLUDE_FILE
#endif
#ifdef UNDEF_TRACE_INCLUDE_PATH
# undef TRACE_INCLUDE_PATH
# undef UNDEF_TRACE_INCLUDE_PATH
#endif
/* We may be processing more files */
#define CREATE_TRACE_POINTS
#endif /* CREATE_TRACE_POINTS */

View file

@ -1,84 +0,0 @@
/*
* lttng-events-reset.h
*
* Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
/* Reset macros used within TRACE_EVENT to "nothing" */
#undef __field_full
#define __field_full(_type, _item, _order, _base)
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)
#undef __dynamic_array_len
#define __dynamic_array_len(_type, _item, _length)
#undef __string
#define __string(_item, _src)
#undef tp_assign
#define tp_assign(dest, src)
#undef tp_memcpy
#define tp_memcpy(dest, src, len)
#undef tp_memcpy_dyn
#define tp_memcpy_dyn(dest, src, len)
#undef tp_strcpy
#define tp_strcpy(dest, src)
#undef __get_str
#define __get_str(field)
#undef __get_dynamic_array
#define __get_dynamic_array(field)
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field)
#undef TP_PROTO
#define TP_PROTO(args...)
#undef TP_ARGS
#define TP_ARGS(args...)
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...)
#undef TP_fast_assign
#define TP_fast_assign(args...)
#undef __perf_count
#define __perf_count(args...)
#undef __perf_addr
#define __perf_addr(args...)
#undef TP_perf_assign
#define TP_perf_assign(args...)
#undef TP_printk
#define TP_printk(args...)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print)
#undef DECLARE_EVENT_CLASS_NOARGS
#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print)
#undef DEFINE_EVENT
#define DEFINE_EVENT(_template, _name, _proto, _args)
#undef DEFINE_EVENT_NOARGS
#define DEFINE_EVENT_NOARGS(_template, _name)
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(name, value)

View file

@ -1,703 +0,0 @@
/*
* lttng-events.h
*
* Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
* Copyright (C) 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/debugfs.h>
#include "lttng.h"
#include "lttng-types.h"
#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
#include "../wrapper/ringbuffer/frontend_types.h"
#include "../ltt-events.h"
#include "../ltt-tracer-core.h"
/*
* Macro declarations used for all stages.
*/
/*
* DECLARE_EVENT_CLASS can be used to add a generic function
* handlers for events. That is, if all events have the same
* parameters and just have distinct trace points.
* Each tracepoint can be defined with DEFINE_EVENT and that
* will map the DECLARE_EVENT_CLASS to the tracepoint.
*
* TRACE_EVENT is a one to one mapping between tracepoint and template.
*/
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DECLARE_EVENT_CLASS(name, \
PARAMS(proto), \
PARAMS(args), \
PARAMS(tstruct), \
PARAMS(assign), \
PARAMS(print)) \
DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_NOARGS
#define TRACE_EVENT_NOARGS(name, tstruct, assign, print) \
DECLARE_EVENT_CLASS_NOARGS(name, \
PARAMS(tstruct), \
PARAMS(assign), \
PARAMS(print)) \
DEFINE_EVENT_NOARGS(name, name)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
/* Callbacks are meaningless to LTTng. */
#undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
/*
* Stage 1 of the trace events.
*
* Create dummy trace calls for each events, verifying that the LTTng module
* TRACE_EVENT headers match the kernel arguments. Will be optimized out by the
* compiler.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef TP_ARGS
#define TP_ARGS(args...) args
#undef DEFINE_EVENT
#define DEFINE_EVENT(_template, _name, _proto, _args) \
void trace_##_name(_proto);
#undef DEFINE_EVENT_NOARGS
#define DEFINE_EVENT_NOARGS(_template, _name) \
void trace_##_name(void *__data);
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 2 of the trace events.
*
* Create event field type metadata section.
* Each event produce an array of fields.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
/* Named field types must be defined in lttng-types.h */
#undef __field_full
#define __field_full(_type, _item, _order, _base) \
{ \
.name = #_item, \
.type = __type_integer(_type, _order, _base, none), \
},
#undef __field
#define __field(_type, _item) \
__field_full(_type, _item, __BYTE_ORDER, 10)
#undef __field_ext
#define __field_ext(_type, _item, _filter_type) \
__field(_type, _item)
#undef __field_hex
#define __field_hex(_type, _item) \
__field_full(_type, _item, __BYTE_ORDER, 16)
#undef __field_network
#define __field_network(_type, _item) \
__field_full(_type, _item, __BIG_ENDIAN, 10)
#undef __field_network_hex
#define __field_network_hex(_type, _item) \
__field_full(_type, _item, __BIG_ENDIAN, 16)
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
{ \
.name = #_item, \
.type = \
{ \
.atype = atype_array, \
.u.array = \
{ \
.length = _length, \
.elem_type = __type_integer(_type, _order, _base, _encoding), \
}, \
}, \
},
#undef __array
#define __array(_type, _item, _length) \
__array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
#undef __array_text
#define __array_text(_type, _item, _length) \
__array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
#undef __array_hex
#define __array_hex(_type, _item, _length) \
__array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
{ \
.name = #_item, \
.type = \
{ \
.atype = atype_sequence, \
.u.sequence = \
{ \
.length_type = __type_integer(u32, __BYTE_ORDER, 10, none), \
.elem_type = __type_integer(_type, _order, _base, _encoding), \
}, \
}, \
},
#undef __dynamic_array
#define __dynamic_array(_type, _item, _length) \
__dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, none)
#undef __dynamic_array_text
#define __dynamic_array_text(_type, _item, _length) \
__dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 10, UTF8)
#undef __dynamic_array_hex
#define __dynamic_array_hex(_type, _item, _length) \
__dynamic_array_enc_ext(_type, _item, _length, __BYTE_ORDER, 16, none)
#undef __string
#define __string(_item, _src) \
{ \
.name = #_item, \
.type = \
{ \
.atype = atype_string, \
.u.basic.string.encoding = lttng_encode_UTF8, \
}, \
},
#undef __string_from_user
#define __string_from_user(_item, _src) \
__string(_item, _src)
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args /* Only one used in this phase */
#undef DECLARE_EVENT_CLASS_NOARGS
#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
static const struct lttng_event_field __event_fields___##_name[] = { \
_tstruct \
};
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
DECLARE_EVENT_CLASS_NOARGS(_name, PARAMS(_tstruct), PARAMS(_assign), \
PARAMS(_print))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 3 of the trace events.
*
* Create probe callback prototypes.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
static void __event_probe__##_name(void *__data, _proto);
#undef DECLARE_EVENT_CLASS_NOARGS
#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
static void __event_probe__##_name(void *__data);
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 3.9 of the trace events.
*
* Create event descriptions.
*/
/* Named field types must be defined in lttng-types.h */
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#ifndef TP_PROBE_CB
#define TP_PROBE_CB(_template) &__event_probe__##_template
#endif
#undef DEFINE_EVENT_NOARGS
#define DEFINE_EVENT_NOARGS(_template, _name) \
static const struct lttng_event_desc __event_desc___##_name = { \
.fields = __event_fields___##_template, \
.name = #_name, \
.probe_callback = (void *) TP_PROBE_CB(_template), \
.nr_fields = ARRAY_SIZE(__event_fields___##_template), \
.owner = THIS_MODULE, \
};
#undef DEFINE_EVENT
#define DEFINE_EVENT(_template, _name, _proto, _args) \
DEFINE_EVENT_NOARGS(_template, _name)
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 4 of the trace events.
*
* Create an array of event description pointers.
*/
/* Named field types must be defined in lttng-types.h */
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#undef DEFINE_EVENT_NOARGS
#define DEFINE_EVENT_NOARGS(_template, _name) \
&__event_desc___##_name,
#undef DEFINE_EVENT
#define DEFINE_EVENT(_template, _name, _proto, _args) \
DEFINE_EVENT_NOARGS(_template, _name)
#define TP_ID1(_token, _system) _token##_system
#define TP_ID(_token, _system) TP_ID1(_token, _system)
static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
};
#undef TP_ID1
#undef TP_ID
/*
* Stage 5 of the trace events.
*
* Create a toplevel descriptor for the whole probe.
*/
#define TP_ID1(_token, _system) _token##_system
#define TP_ID(_token, _system) TP_ID1(_token, _system)
/* non-const because list head will be modified when registered. */
static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
.event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
.nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
};
#undef TP_ID1
#undef TP_ID
/*
* Stage 6 of the trace events.
*
* Create static inline function that calculates event size.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
/* Named field types must be defined in lttng-types.h */
#undef __field_full
#define __field_full(_type, _item, _order, _base) \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
__event_len += sizeof(_type);
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
__event_len += sizeof(_type) * (_length);
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(u32)); \
__event_len += sizeof(u32); \
__event_len += lib_ring_buffer_align(__event_len, ltt_alignof(_type)); \
__dynamic_len[__dynamic_len_idx] = (_length); \
__event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
__dynamic_len_idx++;
#undef __string
#define __string(_item, _src) \
__event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
/*
* strlen_user includes \0. If returns 0, it faulted, so we set size to
* 1 (\0 only).
*/
#undef __string_from_user
#define __string_from_user(_item, _src) \
__event_len += __dynamic_len[__dynamic_len_idx++] = \
max_t(size_t, strlen_user(_src), 1);
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
{ \
size_t __event_len = 0; \
unsigned int __dynamic_len_idx = 0; \
\
if (0) \
(void) __dynamic_len_idx; /* don't warn if unused */ \
_tstruct \
return __event_len; \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 7 of the trace events.
*
* Create static inline function that calculates event payload alignment.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
/* Named field types must be defined in lttng-types.h */
#undef __field_full
#define __field_full(_type, _item, _order, _base) \
__event_align = max_t(size_t, __event_align, ltt_alignof(_type));
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
__event_align = max_t(size_t, __event_align, ltt_alignof(_type));
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
__event_align = max_t(size_t, __event_align, ltt_alignof(u32)); \
__event_align = max_t(size_t, __event_align, ltt_alignof(_type));
#undef __string
#define __string(_item, _src)
#undef __string_from_user
#define __string_from_user(_item, _src)
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
static inline size_t __event_get_align__##_name(_proto) \
{ \
size_t __event_align = 1; \
_tstruct \
return __event_align; \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 8 of the trace events.
*
* Create structure declaration that allows the "assign" macros to access the
* field types.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
/* Named field types must be defined in lttng-types.h */
#undef __field_full
#define __field_full(_type, _item, _order, _base) _type _item;
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding) \
_type _item;
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
_type _item;
#undef __string
#define __string(_item, _src) char _item;
#undef __string_from_user
#define __string_from_user(_item, _src) \
__string(_item, _src)
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
struct __event_typemap__##_name { \
_tstruct \
};
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 9 of the trace events.
*
* Create the probe function : call even size calculation and write event data
* into the buffer.
*
* We use both the field and assignment macros to write the fields in the order
* defined in the field declaration. The field declarations control the
* execution order, jumping to the appropriate assignment block.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#undef __field_full
#define __field_full(_type, _item, _order, _base) \
goto __assign_##_item; \
__end_field_##_item:
#undef __array_enc_ext
#define __array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
goto __assign_##_item; \
__end_field_##_item:
#undef __dynamic_array_enc_ext
#define __dynamic_array_enc_ext(_type, _item, _length, _order, _base, _encoding)\
goto __assign_##_item##_1; \
__end_field_##_item##_1: \
goto __assign_##_item##_2; \
__end_field_##_item##_2:
#undef __string
#define __string(_item, _src) \
goto __assign_##_item; \
__end_field_##_item:
#undef __string_from_user
#define __string_from_user(_item, _src) \
__string(_item, _src)
/*
* Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
* strcpy().
*/
#undef tp_assign
#define tp_assign(dest, src) \
__assign_##dest: \
{ \
__typeof__(__typemap.dest) __tmp = (src); \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__tmp)); \
__chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
} \
goto __end_field_##dest;
#undef tp_memcpy
#define tp_memcpy(dest, src, len) \
__assign_##dest: \
if (0) \
(void) __typemap.dest; \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
__chan->ops->event_write(&__ctx, src, len); \
goto __end_field_##dest;
#undef tp_memcpy_dyn
#define tp_memcpy_dyn(dest, src) \
__assign_##dest##_1: \
{ \
u32 __tmpl = __dynamic_len[__dynamic_len_idx]; \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(u32)); \
__chan->ops->event_write(&__ctx, &__tmpl, sizeof(u32)); \
} \
goto __end_field_##dest##_1; \
__assign_##dest##_2: \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
__chan->ops->event_write(&__ctx, src, \
sizeof(__typemap.dest) * __get_dynamic_array_len(dest));\
goto __end_field_##dest##_2;
#undef tp_memcpy_from_user
#define tp_memcpy_from_user(dest, src, len) \
__assign_##dest: \
if (0) \
(void) __typemap.dest; \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest)); \
__chan->ops->event_write_from_user(&__ctx, src, len); \
goto __end_field_##dest;
/*
* The string length including the final \0.
*/
#undef tp_copy_string_from_user
#define tp_copy_string_from_user(dest, src) \
__assign_##dest: \
{ \
size_t __ustrlen; \
\
if (0) \
(void) __typemap.dest; \
lib_ring_buffer_align_ctx(&__ctx, ltt_alignof(__typemap.dest));\
__ustrlen = __get_dynamic_array_len(dest); \
if (likely(__ustrlen > 1)) { \
__chan->ops->event_write_from_user(&__ctx, src, \
__ustrlen - 1); \
} \
__chan->ops->event_memset(&__ctx, 0, 1); \
} \
goto __end_field_##dest;
#undef tp_strcpy
#define tp_strcpy(dest, src) \
tp_memcpy(dest, src, __get_dynamic_array_len(dest))
/* Named field types must be defined in lttng-types.h */
#undef __get_str
#define __get_str(field) field
#undef __get_dynamic_array
#define __get_dynamic_array(field) field
/* Beware: this get len actually consumes the len value */
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++]
#undef TP_PROTO
#define TP_PROTO(args...) args
#undef TP_ARGS
#define TP_ARGS(args...) args
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
#undef TP_fast_assign
#define TP_fast_assign(args...) args
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
static void __event_probe__##_name(void *__data, _proto) \
{ \
struct ltt_event *__event = __data; \
struct ltt_channel *__chan = __event->chan; \
struct lib_ring_buffer_ctx __ctx; \
size_t __event_len, __event_align; \
size_t __dynamic_len_idx = 0; \
size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
struct __event_typemap__##_name __typemap; \
int __ret; \
\
if (0) \
(void) __dynamic_len_idx; /* don't warn if unused */ \
if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
return; \
if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
return; \
if (unlikely(!ACCESS_ONCE(__event->enabled))) \
return; \
__event_len = __event_get_size__##_name(__dynamic_len, _args); \
__event_align = __event_get_align__##_name(_args); \
lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
__event_align, -1); \
__ret = __chan->ops->event_reserve(&__ctx, __event->id); \
if (__ret < 0) \
return; \
/* Control code (field ordering) */ \
_tstruct \
__chan->ops->event_commit(&__ctx); \
return; \
/* Copy code, steered by control code */ \
_assign \
}
#undef DECLARE_EVENT_CLASS_NOARGS
#define DECLARE_EVENT_CLASS_NOARGS(_name, _tstruct, _assign, _print) \
static void __event_probe__##_name(void *__data) \
{ \
struct ltt_event *__event = __data; \
struct ltt_channel *__chan = __event->chan; \
struct lib_ring_buffer_ctx __ctx; \
size_t __event_len, __event_align; \
int __ret; \
\
if (unlikely(!ACCESS_ONCE(__chan->session->active))) \
return; \
if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
return; \
if (unlikely(!ACCESS_ONCE(__event->enabled))) \
return; \
__event_len = 0; \
__event_align = 1; \
lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
__event_align, -1); \
__ret = __chan->ops->event_reserve(&__ctx, __event->id); \
if (__ret < 0) \
return; \
/* Control code (field ordering) */ \
_tstruct \
__chan->ops->event_commit(&__ctx); \
return; \
/* Copy code, steered by control code */ \
_assign \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* Stage 10 of the trace events.
*
* Register/unregister probes at module load/unload.
*/
#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
#define TP_ID1(_token, _system) _token##_system
#define TP_ID(_token, _system) TP_ID1(_token, _system)
#define module_init_eval1(_token, _system) module_init(_token##_system)
#define module_init_eval(_token, _system) module_init_eval1(_token, _system)
#define module_exit_eval1(_token, _system) module_exit(_token##_system)
#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
#ifndef TP_MODULE_OVERRIDE
static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
{
wrapper_vmalloc_sync_all();
return ltt_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
}
module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
{
ltt_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
}
module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
#endif
#undef module_init_eval
#undef module_exit_eval
#undef TP_ID1
#undef TP_ID
#undef TP_PROTO
#undef TP_ARGS
#undef TRACE_EVENT_FLAGS

View file

@ -1,188 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng function tracer integration module.
*
* Dual LGPL v2.1/GPL v2 license.
*/
/*
* Ftrace function tracer does not seem to provide synchronization between probe
* teardown and callback execution. Therefore, we make this module permanently
* loaded (unloadable).
*
* TODO: Move to register_ftrace_function() (which is exported for
* modules) for Linux >= 3.0. It is faster (only enables the selected
* functions), and will stay there.
*/
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include "../ltt-events.h"
#include "../wrapper/ringbuffer/frontend_types.h"
#include "../wrapper/ftrace.h"
#include "../wrapper/vmalloc.h"
#include "../ltt-tracer.h"
static
void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
{
struct ltt_event *event = *data;
struct ltt_channel *chan = event->chan;
struct lib_ring_buffer_ctx ctx;
struct {
unsigned long ip;
unsigned long parent_ip;
} payload;
int ret;
if (unlikely(!ACCESS_ONCE(chan->session->active)))
return;
if (unlikely(!ACCESS_ONCE(chan->enabled)))
return;
if (unlikely(!ACCESS_ONCE(event->enabled)))
return;
lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
sizeof(payload), ltt_alignof(payload), -1);
ret = chan->ops->event_reserve(&ctx, event->id);
if (ret < 0)
return;
payload.ip = ip;
payload.parent_ip = parent_ip;
lib_ring_buffer_align_ctx(&ctx, ltt_alignof(payload));
chan->ops->event_write(&ctx, &payload, sizeof(payload));
chan->ops->event_commit(&ctx);
return;
}
/*
* Create event description
*/
static
int lttng_create_ftrace_event(const char *name, struct ltt_event *event)
{
struct lttng_event_field *fields;
struct lttng_event_desc *desc;
int ret;
desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->name = kstrdup(name, GFP_KERNEL);
if (!desc->name) {
ret = -ENOMEM;
goto error_str;
}
desc->nr_fields = 2;
desc->fields = fields =
kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
if (!desc->fields) {
ret = -ENOMEM;
goto error_fields;
}
fields[0].name = "ip";
fields[0].type.atype = atype_integer;
fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
fields[0].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
fields[0].type.u.basic.integer.reverse_byte_order = 0;
fields[0].type.u.basic.integer.base = 16;
fields[0].type.u.basic.integer.encoding = lttng_encode_none;
fields[1].name = "parent_ip";
fields[1].type.atype = atype_integer;
fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
fields[1].type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
fields[1].type.u.basic.integer.reverse_byte_order = 0;
fields[1].type.u.basic.integer.base = 16;
fields[1].type.u.basic.integer.encoding = lttng_encode_none;
desc->owner = THIS_MODULE;
event->desc = desc;
return 0;
error_fields:
kfree(desc->name);
error_str:
kfree(desc);
return ret;
}
static
struct ftrace_probe_ops lttng_ftrace_ops = {
.func = lttng_ftrace_handler,
};
int lttng_ftrace_register(const char *name,
const char *symbol_name,
struct ltt_event *event)
{
int ret;
ret = lttng_create_ftrace_event(name, event);
if (ret)
goto error;
event->u.ftrace.symbol_name = kstrdup(symbol_name, GFP_KERNEL);
if (!event->u.ftrace.symbol_name)
goto name_error;
/* Ensure the memory we just allocated don't trigger page faults */
wrapper_vmalloc_sync_all();
ret = wrapper_register_ftrace_function_probe(event->u.ftrace.symbol_name,
&lttng_ftrace_ops, event);
if (ret < 0)
goto register_error;
return 0;
register_error:
kfree(event->u.ftrace.symbol_name);
name_error:
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
EXPORT_SYMBOL_GPL(lttng_ftrace_register);
void lttng_ftrace_unregister(struct ltt_event *event)
{
wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
&lttng_ftrace_ops, event);
}
EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
void lttng_ftrace_destroy_private(struct ltt_event *event)
{
kfree(event->u.ftrace.symbol_name);
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
}
EXPORT_SYMBOL_GPL(lttng_ftrace_destroy_private);
int lttng_ftrace_init(void)
{
wrapper_vmalloc_sync_all();
return 0;
}
module_init(lttng_ftrace_init)
/*
* Ftrace takes care of waiting for a grace period (RCU sched) at probe
* unregistration, and disables preemption around probe call.
*/
void lttng_ftrace_exit(void)
{
}
module_exit(lttng_ftrace_exit)
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Ftrace Support");

View file

@ -1,164 +0,0 @@
/*
* (C) Copyright 2009-2011 -
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* LTTng kprobes integration module.
*
* Dual LGPL v2.1/GPL v2 license.
*/
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include "../ltt-events.h"
#include "../wrapper/ringbuffer/frontend_types.h"
#include "../wrapper/vmalloc.h"
#include "../ltt-tracer.h"
static
int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs)
{
struct ltt_event *event =
container_of(p, struct ltt_event, u.kprobe.kp);
struct ltt_channel *chan = event->chan;
struct lib_ring_buffer_ctx ctx;
int ret;
unsigned long data = (unsigned long) p->addr;
if (unlikely(!ACCESS_ONCE(chan->session->active)))
return 0;
if (unlikely(!ACCESS_ONCE(chan->enabled)))
return 0;
if (unlikely(!ACCESS_ONCE(event->enabled)))
return 0;
lib_ring_buffer_ctx_init(&ctx, chan->chan, event, sizeof(data),
ltt_alignof(data), -1);
ret = chan->ops->event_reserve(&ctx, event->id);
if (ret < 0)
return 0;
lib_ring_buffer_align_ctx(&ctx, ltt_alignof(data));
chan->ops->event_write(&ctx, &data, sizeof(data));
chan->ops->event_commit(&ctx);
return 0;
}
/*
* Create event description
*/
static
int lttng_create_kprobe_event(const char *name, struct ltt_event *event)
{
struct lttng_event_field *field;
struct lttng_event_desc *desc;
int ret;
desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->name = kstrdup(name, GFP_KERNEL);
if (!desc->name) {
ret = -ENOMEM;
goto error_str;
}
desc->nr_fields = 1;
desc->fields = field =
kzalloc(1 * sizeof(struct lttng_event_field), GFP_KERNEL);
if (!field) {
ret = -ENOMEM;
goto error_field;
}
field->name = "ip";
field->type.atype = atype_integer;
field->type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
field->type.u.basic.integer.alignment = ltt_alignof(unsigned long) * CHAR_BIT;
field->type.u.basic.integer.signedness = is_signed_type(unsigned long);
field->type.u.basic.integer.reverse_byte_order = 0;
field->type.u.basic.integer.base = 16;
field->type.u.basic.integer.encoding = lttng_encode_none;
desc->owner = THIS_MODULE;
event->desc = desc;
return 0;
error_field:
kfree(desc->name);
error_str:
kfree(desc);
return ret;
}
int lttng_kprobes_register(const char *name,
const char *symbol_name,
uint64_t offset,
uint64_t addr,
struct ltt_event *event)
{
int ret;
/* Kprobes expects a NULL symbol name if unused */
if (symbol_name[0] == '\0')
symbol_name = NULL;
ret = lttng_create_kprobe_event(name, event);
if (ret)
goto error;
memset(&event->u.kprobe.kp, 0, sizeof(event->u.kprobe.kp));
event->u.kprobe.kp.pre_handler = lttng_kprobes_handler_pre;
if (symbol_name) {
event->u.kprobe.symbol_name =
kzalloc(LTTNG_SYM_NAME_LEN * sizeof(char),
GFP_KERNEL);
if (!event->u.kprobe.symbol_name) {
ret = -ENOMEM;
goto name_error;
}
memcpy(event->u.kprobe.symbol_name, symbol_name,
LTTNG_SYM_NAME_LEN * sizeof(char));
event->u.kprobe.kp.symbol_name =
event->u.kprobe.symbol_name;
}
event->u.kprobe.kp.offset = offset;
event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
/*
* Ensure the memory we just allocated don't trigger page faults.
* Well.. kprobes itself puts the page fault handler on the blacklist,
* but we can never be too careful.
*/
wrapper_vmalloc_sync_all();
ret = register_kprobe(&event->u.kprobe.kp);
if (ret)
goto register_error;
return 0;
register_error:
kfree(event->u.kprobe.symbol_name);
name_error:
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
error:
return ret;
}
EXPORT_SYMBOL_GPL(lttng_kprobes_register);
void lttng_kprobes_unregister(struct ltt_event *event)
{
unregister_kprobe(&event->u.kprobe.kp);
}
EXPORT_SYMBOL_GPL(lttng_kprobes_unregister);
void lttng_kprobes_destroy_private(struct ltt_event *event)
{
kfree(event->u.kprobe.symbol_name);
kfree(event->desc->fields);
kfree(event->desc->name);
kfree(event->desc);
}
EXPORT_SYMBOL_GPL(lttng_kprobes_destroy_private);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("Mathieu Desnoyers");
MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support");

Some files were not shown because too many files have changed in this diff Show more