You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"This patch series contains several clean ups and even a new trace
clock "monitonic raw". Also some enhancements to make the ring buffer
even faster. But the biggest and most noticeable change is the
renaming of the ftrace* files, structures and variables that have to
deal with trace events.
Over the years I've had several developers tell me about their
confusion with what ftrace is compared to events. Technically,
"ftrace" is the infrastructure to do the function hooks, which include
tracing and also helps with live kernel patching. But the trace
events are a separate entity altogether, and the files that affect the
trace events should not be named "ftrace". These include:
include/trace/ftrace.h -> include/trace/trace_events.h
include/linux/ftrace_event.h -> include/linux/trace_events.h
Also, functions that are specific for trace events have also been renamed:
ftrace_print_*() -> trace_print_*()
(un)register_ftrace_event() -> (un)register_trace_event()
ftrace_event_name() -> trace_event_name()
ftrace_trigger_soft_disabled() -> trace_trigger_soft_disabled()
ftrace_define_fields_##call() -> trace_define_fields_##call()
ftrace_get_offsets_##call() -> trace_get_offsets_##call()
Structures have been renamed:
ftrace_event_file -> trace_event_file
ftrace_event_{call,class} -> trace_event_{call,class}
ftrace_event_buffer -> trace_event_buffer
ftrace_subsystem_dir -> trace_subsystem_dir
ftrace_event_raw_##call -> trace_event_raw_##call
ftrace_event_data_offset_##call-> trace_event_data_offset_##call
ftrace_event_type_funcs_##call -> trace_event_type_funcs_##call
And a few various variables and flags have also been updated.
This has been sitting in linux-next for some time, and I have not
heard a single complaint about this rename breaking anything. Mostly
because these functions, variables and structures are mostly internal
to the tracing system and are seldom (if ever) used by anything
external to that"
* tag 'trace-v4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (33 commits)
ring_buffer: Allow to exit the ring buffer benchmark immediately
ring-buffer-benchmark: Fix the wrong type
ring-buffer-benchmark: Fix the wrong param in module_param
ring-buffer: Add enum names for the context levels
ring-buffer: Remove useless unused tracing_off_permanent()
ring-buffer: Give NMIs a chance to lock the reader_lock
ring-buffer: Add trace_recursive checks to ring_buffer_write()
ring-buffer: Allways do the trace_recursive checks
ring-buffer: Move recursive check to per_cpu descriptor
ring-buffer: Add unlikelys to make fast path the default
tracing: Rename ftrace_get_offsets_##call() to trace_event_get_offsets_##call()
tracing: Rename ftrace_define_fields_##call() to trace_event_define_fields_##call()
tracing: Rename ftrace_event_type_funcs_##call to trace_event_type_funcs_##call
tracing: Rename ftrace_data_offset_##call to trace_event_data_offset_##call
tracing: Rename ftrace_raw_##call event structures to trace_event_raw_##call
tracing: Rename ftrace_trigger_soft_disabled() to trace_trigger_soft_disabled()
tracing: Rename FTRACE_EVENT_FL_* flags to EVENT_FILE_FL_*
tracing: Rename struct ftrace_subsystem_dir to trace_subsystem_dir
tracing: Rename ftrace_event_name() to trace_event_name()
tracing: Rename FTRACE_MAX_EVENT to TRACE_EVENT_TYPE_MAX
...
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
#define _TRACE_KVMMMU_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvmmmu
|
||||
|
||||
+1
-1
@@ -29,7 +29,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
+1
-1
@@ -28,7 +28,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
@@ -533,12 +533,6 @@ bool mac_pton(const char *s, u8 *mac);
|
||||
*
|
||||
* Most likely, you want to use tracing_on/tracing_off.
|
||||
*/
|
||||
#ifdef CONFIG_RING_BUFFER
|
||||
/* trace_off_permanent stops recording with no way to bring it back */
|
||||
void tracing_off_permanent(void);
|
||||
#else
|
||||
static inline void tracing_off_permanent(void) { }
|
||||
#endif
|
||||
|
||||
enum ftrace_dump_mode {
|
||||
DUMP_NONE,
|
||||
|
||||
@@ -336,7 +336,7 @@ struct module {
|
||||
const char **trace_bprintk_fmt_start;
|
||||
#endif
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
struct ftrace_event_call **trace_events;
|
||||
struct trace_event_call **trace_events;
|
||||
unsigned int num_trace_events;
|
||||
struct trace_enum_map **trace_enums;
|
||||
unsigned int num_trace_enums;
|
||||
|
||||
@@ -484,7 +484,7 @@ struct perf_event {
|
||||
void *overflow_handler_context;
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
struct ftrace_event_call *tp_event;
|
||||
struct trace_event_call *tp_event;
|
||||
struct event_filter *filter;
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
struct ftrace_ops ftrace_ops;
|
||||
|
||||
@@ -111,14 +111,14 @@ union bpf_attr;
|
||||
#define __SC_STR_ADECL(t, a) #a
|
||||
#define __SC_STR_TDECL(t, a) #t
|
||||
|
||||
extern struct ftrace_event_class event_class_syscall_enter;
|
||||
extern struct ftrace_event_class event_class_syscall_exit;
|
||||
extern struct trace_event_class event_class_syscall_enter;
|
||||
extern struct trace_event_class event_class_syscall_exit;
|
||||
extern struct trace_event_functions enter_syscall_print_funcs;
|
||||
extern struct trace_event_functions exit_syscall_print_funcs;
|
||||
|
||||
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
|
||||
static struct syscall_metadata __syscall_meta_##sname; \
|
||||
static struct ftrace_event_call __used \
|
||||
static struct trace_event_call __used \
|
||||
event_enter_##sname = { \
|
||||
.class = &event_class_syscall_enter, \
|
||||
{ \
|
||||
@@ -128,13 +128,13 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
||||
.data = (void *)&__syscall_meta_##sname,\
|
||||
.flags = TRACE_EVENT_FL_CAP_ANY, \
|
||||
}; \
|
||||
static struct ftrace_event_call __used \
|
||||
static struct trace_event_call __used \
|
||||
__attribute__((section("_ftrace_events"))) \
|
||||
*__event_enter_##sname = &event_enter_##sname;
|
||||
|
||||
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
|
||||
static struct syscall_metadata __syscall_meta_##sname; \
|
||||
static struct ftrace_event_call __used \
|
||||
static struct trace_event_call __used \
|
||||
event_exit_##sname = { \
|
||||
.class = &event_class_syscall_exit, \
|
||||
{ \
|
||||
@@ -144,7 +144,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
||||
.data = (void *)&__syscall_meta_##sname,\
|
||||
.flags = TRACE_EVENT_FL_CAP_ANY, \
|
||||
}; \
|
||||
static struct ftrace_event_call __used \
|
||||
static struct trace_event_call __used \
|
||||
__attribute__((section("_ftrace_events"))) \
|
||||
*__event_exit_##sname = &event_exit_##sname;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
#ifndef _LINUX_FTRACE_EVENT_H
|
||||
#define _LINUX_FTRACE_EVENT_H
|
||||
#ifndef _LINUX_TRACE_EVENT_H
|
||||
#define _LINUX_TRACE_EVENT_H
|
||||
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/trace_seq.h>
|
||||
@@ -25,35 +25,35 @@ struct trace_print_flags_u64 {
|
||||
const char *name;
|
||||
};
|
||||
|
||||
const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
|
||||
unsigned long flags,
|
||||
const struct trace_print_flags *flag_array);
|
||||
const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
|
||||
unsigned long flags,
|
||||
const struct trace_print_flags *flag_array);
|
||||
|
||||
const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
||||
const struct trace_print_flags *symbol_array);
|
||||
const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
|
||||
const struct trace_print_flags *symbol_array);
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
|
||||
unsigned long long val,
|
||||
const struct trace_print_flags_u64
|
||||
const char *trace_print_symbols_seq_u64(struct trace_seq *p,
|
||||
unsigned long long val,
|
||||
const struct trace_print_flags_u64
|
||||
*symbol_array);
|
||||
#endif
|
||||
|
||||
const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
|
||||
unsigned int bitmask_size);
|
||||
const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
|
||||
unsigned int bitmask_size);
|
||||
|
||||
const char *ftrace_print_hex_seq(struct trace_seq *p,
|
||||
const unsigned char *buf, int len);
|
||||
const char *trace_print_hex_seq(struct trace_seq *p,
|
||||
const unsigned char *buf, int len);
|
||||
|
||||
const char *ftrace_print_array_seq(struct trace_seq *p,
|
||||
const char *trace_print_array_seq(struct trace_seq *p,
|
||||
const void *buf, int count,
|
||||
size_t el_size);
|
||||
|
||||
struct trace_iterator;
|
||||
struct trace_event;
|
||||
|
||||
int ftrace_raw_output_prep(struct trace_iterator *iter,
|
||||
struct trace_event *event);
|
||||
int trace_raw_output_prep(struct trace_iterator *iter,
|
||||
struct trace_event *event);
|
||||
|
||||
/*
|
||||
* The trace entry - the most basic unit of tracing. This is what
|
||||
@@ -68,7 +68,7 @@ struct trace_entry {
|
||||
int pid;
|
||||
};
|
||||
|
||||
#define FTRACE_MAX_EVENT \
|
||||
#define TRACE_EVENT_TYPE_MAX \
|
||||
((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
|
||||
|
||||
/*
|
||||
@@ -132,8 +132,8 @@ struct trace_event {
|
||||
struct trace_event_functions *funcs;
|
||||
};
|
||||
|
||||
extern int register_ftrace_event(struct trace_event *event);
|
||||
extern int unregister_ftrace_event(struct trace_event *event);
|
||||
extern int register_trace_event(struct trace_event *event);
|
||||
extern int unregister_trace_event(struct trace_event *event);
|
||||
|
||||
/* Return values for print_line callback */
|
||||
enum print_line_t {
|
||||
@@ -157,11 +157,11 @@ static inline enum print_line_t trace_handle_return(struct trace_seq *s)
|
||||
void tracing_generic_entry_update(struct trace_entry *entry,
|
||||
unsigned long flags,
|
||||
int pc);
|
||||
struct ftrace_event_file;
|
||||
struct trace_event_file;
|
||||
|
||||
struct ring_buffer_event *
|
||||
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
struct trace_event_file *trace_file,
|
||||
int type, unsigned long len,
|
||||
unsigned long flags, int pc);
|
||||
struct ring_buffer_event *
|
||||
@@ -183,7 +183,7 @@ void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
|
||||
void tracing_record_cmdline(struct task_struct *tsk);
|
||||
|
||||
int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
|
||||
int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
|
||||
|
||||
struct event_filter;
|
||||
|
||||
@@ -200,50 +200,39 @@ enum trace_reg {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ftrace_event_call;
|
||||
struct trace_event_call;
|
||||
|
||||
struct ftrace_event_class {
|
||||
struct trace_event_class {
|
||||
const char *system;
|
||||
void *probe;
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
void *perf_probe;
|
||||
#endif
|
||||
int (*reg)(struct ftrace_event_call *event,
|
||||
int (*reg)(struct trace_event_call *event,
|
||||
enum trace_reg type, void *data);
|
||||
int (*define_fields)(struct ftrace_event_call *);
|
||||
struct list_head *(*get_fields)(struct ftrace_event_call *);
|
||||
int (*define_fields)(struct trace_event_call *);
|
||||
struct list_head *(*get_fields)(struct trace_event_call *);
|
||||
struct list_head fields;
|
||||
int (*raw_init)(struct ftrace_event_call *);
|
||||
int (*raw_init)(struct trace_event_call *);
|
||||
};
|
||||
|
||||
extern int ftrace_event_reg(struct ftrace_event_call *event,
|
||||
extern int trace_event_reg(struct trace_event_call *event,
|
||||
enum trace_reg type, void *data);
|
||||
|
||||
int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
|
||||
char *fmt, ...);
|
||||
|
||||
int ftrace_event_define_field(struct ftrace_event_call *call,
|
||||
char *type, int len, char *item, int offset,
|
||||
int field_size, int sign, int filter);
|
||||
|
||||
struct ftrace_event_buffer {
|
||||
struct trace_event_buffer {
|
||||
struct ring_buffer *buffer;
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_event_file *ftrace_file;
|
||||
struct trace_event_file *trace_file;
|
||||
void *entry;
|
||||
unsigned long flags;
|
||||
int pc;
|
||||
};
|
||||
|
||||
void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
|
||||
struct ftrace_event_file *ftrace_file,
|
||||
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
|
||||
struct trace_event_file *trace_file,
|
||||
unsigned long len);
|
||||
|
||||
void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
|
||||
|
||||
int ftrace_event_define_field(struct ftrace_event_call *call,
|
||||
char *type, int len, char *item, int offset,
|
||||
int field_size, int sign, int filter);
|
||||
void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
|
||||
|
||||
enum {
|
||||
TRACE_EVENT_FL_FILTERED_BIT,
|
||||
@@ -261,11 +250,11 @@ enum {
|
||||
* FILTERED - The event has a filter attached
|
||||
* CAP_ANY - Any user can enable for perf
|
||||
* NO_SET_FILTER - Set when filter has error and is to be ignored
|
||||
* IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
|
||||
* IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
|
||||
* WAS_ENABLED - Set and stays set when an event was ever enabled
|
||||
* (used for module unloading, if a module event is enabled,
|
||||
* it is best to clear the buffers that used it).
|
||||
* USE_CALL_FILTER - For ftrace internal events, don't use file filter
|
||||
* USE_CALL_FILTER - For trace internal events, don't use file filter
|
||||
* TRACEPOINT - Event is a tracepoint
|
||||
* KPROBE - Event is a kprobe
|
||||
*/
|
||||
@@ -280,9 +269,9 @@ enum {
|
||||
TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
|
||||
};
|
||||
|
||||
struct ftrace_event_call {
|
||||
struct trace_event_call {
|
||||
struct list_head list;
|
||||
struct ftrace_event_class *class;
|
||||
struct trace_event_class *class;
|
||||
union {
|
||||
char *name;
|
||||
/* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
|
||||
@@ -297,7 +286,7 @@ struct ftrace_event_call {
|
||||
* bit 0: filter_active
|
||||
* bit 1: allow trace by non root (cap any)
|
||||
* bit 2: failed to apply filter
|
||||
* bit 3: ftrace internal event (do not enable)
|
||||
* bit 3: trace internal event (do not enable)
|
||||
* bit 4: Event was enabled by module
|
||||
* bit 5: use call filter rather than file filter
|
||||
* bit 6: Event is a tracepoint
|
||||
@@ -309,13 +298,13 @@ struct ftrace_event_call {
|
||||
struct hlist_head __percpu *perf_events;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
int (*perf_perm)(struct ftrace_event_call *,
|
||||
int (*perf_perm)(struct trace_event_call *,
|
||||
struct perf_event *);
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline const char *
|
||||
ftrace_event_name(struct ftrace_event_call *call)
|
||||
trace_event_name(struct trace_event_call *call)
|
||||
{
|
||||
if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
|
||||
return call->tp ? call->tp->name : NULL;
|
||||
@@ -324,21 +313,21 @@ ftrace_event_name(struct ftrace_event_call *call)
|
||||
}
|
||||
|
||||
struct trace_array;
|
||||
struct ftrace_subsystem_dir;
|
||||
struct trace_subsystem_dir;
|
||||
|
||||
enum {
|
||||
FTRACE_EVENT_FL_ENABLED_BIT,
|
||||
FTRACE_EVENT_FL_RECORDED_CMD_BIT,
|
||||
FTRACE_EVENT_FL_FILTERED_BIT,
|
||||
FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
|
||||
FTRACE_EVENT_FL_SOFT_MODE_BIT,
|
||||
FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
|
||||
FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
|
||||
FTRACE_EVENT_FL_TRIGGER_COND_BIT,
|
||||
EVENT_FILE_FL_ENABLED_BIT,
|
||||
EVENT_FILE_FL_RECORDED_CMD_BIT,
|
||||
EVENT_FILE_FL_FILTERED_BIT,
|
||||
EVENT_FILE_FL_NO_SET_FILTER_BIT,
|
||||
EVENT_FILE_FL_SOFT_MODE_BIT,
|
||||
EVENT_FILE_FL_SOFT_DISABLED_BIT,
|
||||
EVENT_FILE_FL_TRIGGER_MODE_BIT,
|
||||
EVENT_FILE_FL_TRIGGER_COND_BIT,
|
||||
};
|
||||
|
||||
/*
|
||||
* Ftrace event file flags:
|
||||
* Event file flags:
|
||||
* ENABLED - The event is enabled
|
||||
* RECORDED_CMD - The comms should be recorded at sched_switch
|
||||
* FILTERED - The event has a filter attached
|
||||
@@ -350,23 +339,23 @@ enum {
|
||||
* TRIGGER_COND - When set, one or more triggers has an associated filter
|
||||
*/
|
||||
enum {
|
||||
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
|
||||
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
|
||||
FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
|
||||
FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
|
||||
FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
|
||||
FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
|
||||
FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
|
||||
FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
|
||||
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
|
||||
EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
|
||||
EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
|
||||
EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
|
||||
EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
|
||||
EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
|
||||
EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
|
||||
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
|
||||
};
|
||||
|
||||
struct ftrace_event_file {
|
||||
struct trace_event_file {
|
||||
struct list_head list;
|
||||
struct ftrace_event_call *event_call;
|
||||
struct trace_event_call *event_call;
|
||||
struct event_filter *filter;
|
||||
struct dentry *dir;
|
||||
struct trace_array *tr;
|
||||
struct ftrace_subsystem_dir *system;
|
||||
struct trace_subsystem_dir *system;
|
||||
struct list_head triggers;
|
||||
|
||||
/*
|
||||
@@ -399,7 +388,7 @@ struct ftrace_event_file {
|
||||
early_initcall(trace_init_flags_##name);
|
||||
|
||||
#define __TRACE_EVENT_PERF_PERM(name, expr...) \
|
||||
static int perf_perm_##name(struct ftrace_event_call *tp_event, \
|
||||
static int perf_perm_##name(struct trace_event_call *tp_event, \
|
||||
struct perf_event *p_event) \
|
||||
{ \
|
||||
return ({ expr; }); \
|
||||
@@ -425,19 +414,19 @@ enum event_trigger_type {
|
||||
|
||||
extern int filter_match_preds(struct event_filter *filter, void *rec);
|
||||
|
||||
extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
|
||||
extern int filter_check_discard(struct trace_event_file *file, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
|
||||
extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event);
|
||||
extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
|
||||
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
|
||||
void *rec);
|
||||
extern void event_triggers_post_call(struct ftrace_event_file *file,
|
||||
extern void event_triggers_post_call(struct trace_event_file *file,
|
||||
enum event_trigger_type tt);
|
||||
|
||||
/**
|
||||
* ftrace_trigger_soft_disabled - do triggers and test if soft disabled
|
||||
* trace_trigger_soft_disabled - do triggers and test if soft disabled
|
||||
* @file: The file pointer of the event to test
|
||||
*
|
||||
* If any triggers without filters are attached to this event, they
|
||||
@@ -446,14 +435,14 @@ extern void event_triggers_post_call(struct ftrace_event_file *file,
|
||||
* otherwise false.
|
||||
*/
|
||||
static inline bool
|
||||
ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
|
||||
trace_trigger_soft_disabled(struct trace_event_file *file)
|
||||
{
|
||||
unsigned long eflags = file->flags;
|
||||
|
||||
if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
|
||||
if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
|
||||
if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
|
||||
if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
|
||||
event_triggers_call(file, NULL);
|
||||
if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
|
||||
if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@@ -473,7 +462,7 @@ ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
|
||||
* Returns true if the event is discarded, false otherwise.
|
||||
*/
|
||||
static inline bool
|
||||
__event_trigger_test_discard(struct ftrace_event_file *file,
|
||||
__event_trigger_test_discard(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry,
|
||||
@@ -481,10 +470,10 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
|
||||
{
|
||||
unsigned long eflags = file->flags;
|
||||
|
||||
if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
|
||||
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
|
||||
*tt = event_triggers_call(file, entry);
|
||||
|
||||
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
|
||||
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags))
|
||||
ring_buffer_discard_commit(buffer, event);
|
||||
else if (!filter_check_discard(file, entry, buffer, event))
|
||||
return false;
|
||||
@@ -506,7 +495,7 @@ __event_trigger_test_discard(struct ftrace_event_file *file,
|
||||
* if the event is soft disabled and should be discarded.
|
||||
*/
|
||||
static inline void
|
||||
event_trigger_unlock_commit(struct ftrace_event_file *file,
|
||||
event_trigger_unlock_commit(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry, unsigned long irq_flags, int pc)
|
||||
@@ -537,7 +526,7 @@ event_trigger_unlock_commit(struct ftrace_event_file *file,
|
||||
* trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
|
||||
*/
|
||||
static inline void
|
||||
event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
|
||||
event_trigger_unlock_commit_regs(struct trace_event_file *file,
|
||||
struct ring_buffer *buffer,
|
||||
struct ring_buffer_event *event,
|
||||
void *entry, unsigned long irq_flags, int pc,
|
||||
@@ -570,12 +559,12 @@ enum {
|
||||
FILTER_TRACE_FN,
|
||||
};
|
||||
|
||||
extern int trace_event_raw_init(struct ftrace_event_call *call);
|
||||
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
|
||||
extern int trace_event_raw_init(struct trace_event_call *call);
|
||||
extern int trace_define_field(struct trace_event_call *call, const char *type,
|
||||
const char *name, int offset, int size,
|
||||
int is_signed, int filter_type);
|
||||
extern int trace_add_event_call(struct ftrace_event_call *call);
|
||||
extern int trace_remove_event_call(struct ftrace_event_call *call);
|
||||
extern int trace_add_event_call(struct trace_event_call *call);
|
||||
extern int trace_remove_event_call(struct trace_event_call *call);
|
||||
|
||||
#define is_signed_type(type) (((type)(-1)) < (type)1)
|
||||
|
||||
@@ -624,4 +613,4 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_FTRACE_EVENT_H */
|
||||
#endif /* _LINUX_TRACE_EVENT_H */
|
||||
@@ -87,7 +87,8 @@
|
||||
#define DECLARE_TRACE(name, proto, args)
|
||||
|
||||
#ifdef CONFIG_EVENT_TRACING
|
||||
#include <trace/ftrace.h>
|
||||
#include <trace/trace_events.h>
|
||||
#include <trace/perf.h>
|
||||
#endif
|
||||
|
||||
#undef TRACE_EVENT
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
|
||||
#define TPS(x) tracepoint_string(x)
|
||||
|
||||
|
||||
@@ -0,0 +1,350 @@
|
||||
/*
|
||||
* Stage 4 of the trace events.
|
||||
*
|
||||
* Override the macros in <trace/trace_events.h> to include the following:
|
||||
*
|
||||
* For those macros defined with TRACE_EVENT:
|
||||
*
|
||||
* static struct trace_event_call event_<call>;
|
||||
*
|
||||
* static void trace_event_raw_event_<call>(void *__data, proto)
|
||||
* {
|
||||
* struct trace_event_file *trace_file = __data;
|
||||
* struct trace_event_call *event_call = trace_file->event_call;
|
||||
* struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
|
||||
* unsigned long eflags = trace_file->flags;
|
||||
* enum event_trigger_type __tt = ETT_NONE;
|
||||
* struct ring_buffer_event *event;
|
||||
* struct trace_event_raw_<call> *entry; <-- defined in stage 1
|
||||
* struct ring_buffer *buffer;
|
||||
* unsigned long irq_flags;
|
||||
* int __data_size;
|
||||
* int pc;
|
||||
*
|
||||
* if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
|
||||
* if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
|
||||
* event_triggers_call(trace_file, NULL);
|
||||
* if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
|
||||
* return;
|
||||
* }
|
||||
*
|
||||
* local_save_flags(irq_flags);
|
||||
* pc = preempt_count();
|
||||
*
|
||||
* __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
|
||||
*
|
||||
* event = trace_event_buffer_lock_reserve(&buffer, trace_file,
|
||||
* event_<call>->event.type,
|
||||
* sizeof(*entry) + __data_size,
|
||||
* irq_flags, pc);
|
||||
* if (!event)
|
||||
* return;
|
||||
* entry = ring_buffer_event_data(event);
|
||||
*
|
||||
* { <assign>; } <-- Here we assign the entries by the __field and
|
||||
* __array macros.
|
||||
*
|
||||
* if (eflags & EVENT_FILE_FL_TRIGGER_COND)
|
||||
* __tt = event_triggers_call(trace_file, entry);
|
||||
*
|
||||
* if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
|
||||
* &trace_file->flags))
|
||||
* ring_buffer_discard_commit(buffer, event);
|
||||
* else if (!filter_check_discard(trace_file, entry, buffer, event))
|
||||
* trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
|
||||
*
|
||||
* if (__tt)
|
||||
* event_triggers_post_call(trace_file, __tt);
|
||||
* }
|
||||
*
|
||||
* static struct trace_event ftrace_event_type_<call> = {
|
||||
* .trace = trace_raw_output_<call>, <-- stage 2
|
||||
* };
|
||||
*
|
||||
* static char print_fmt_<call>[] = <TP_printk>;
|
||||
*
|
||||
* static struct trace_event_class __used event_class_<template> = {
|
||||
* .system = "<system>",
|
||||
* .define_fields = trace_event_define_fields_<call>,
|
||||
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
|
||||
* .raw_init = trace_event_raw_init,
|
||||
* .probe = trace_event_raw_event_##call,
|
||||
* .reg = trace_event_reg,
|
||||
* };
|
||||
*
|
||||
* static struct trace_event_call event_<call> = {
|
||||
* .class = event_class_<template>,
|
||||
* {
|
||||
* .tp = &__tracepoint_<call>,
|
||||
* },
|
||||
* .event = &ftrace_event_type_<call>,
|
||||
* .print_fmt = print_fmt_<call>,
|
||||
* .flags = TRACE_EVENT_FL_TRACEPOINT,
|
||||
* };
|
||||
* // its only safe to use pointers when doing linker tricks to
|
||||
* // create an array.
|
||||
* static struct trace_event_call __used
|
||||
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#define _TRACE_PERF_PROTO(call, proto) \
|
||||
static notrace void \
|
||||
perf_trace_##call(void *__data, proto);
|
||||
|
||||
#define _TRACE_PERF_INIT(call) \
|
||||
.perf_probe = perf_trace_##call,
|
||||
|
||||
#else
|
||||
#define _TRACE_PERF_PROTO(call, proto)
|
||||
#define _TRACE_PERF_INIT(call)
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#undef __entry
|
||||
#define __entry entry
|
||||
|
||||
#undef __field
|
||||
#define __field(type, item)
|
||||
|
||||
#undef __field_struct
|
||||
#define __field_struct(type, item)
|
||||
|
||||
#undef __array
|
||||
#define __array(type, item, len)
|
||||
|
||||
#undef __dynamic_array
|
||||
#define __dynamic_array(type, item, len) \
|
||||
__entry->__data_loc_##item = __data_offsets.item;
|
||||
|
||||
#undef __string
|
||||
#define __string(item, src) __dynamic_array(char, item, -1)
|
||||
|
||||
#undef __assign_str
|
||||
#define __assign_str(dst, src) \
|
||||
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
|
||||
|
||||
#undef __bitmask
|
||||
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
|
||||
|
||||
#undef __get_bitmask
|
||||
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
|
||||
|
||||
#undef __assign_bitmask
|
||||
#define __assign_bitmask(dst, src, nr_bits) \
|
||||
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
|
||||
|
||||
#undef TP_fast_assign
|
||||
#define TP_fast_assign(args...) args
|
||||
|
||||
#undef __perf_addr
|
||||
#define __perf_addr(a) (a)
|
||||
|
||||
#undef __perf_count
|
||||
#define __perf_count(c) (c)
|
||||
|
||||
#undef __perf_task
|
||||
#define __perf_task(t) (t)
|
||||
|
||||
#undef DECLARE_EVENT_CLASS
|
||||
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
||||
\
|
||||
static notrace void \
|
||||
trace_event_raw_event_##call(void *__data, proto) \
|
||||
{ \
|
||||
struct trace_event_file *trace_file = __data; \
|
||||
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
|
||||
struct trace_event_buffer fbuffer; \
|
||||
struct trace_event_raw_##call *entry; \
|
||||
int __data_size; \
|
||||
\
|
||||
if (trace_trigger_soft_disabled(trace_file)) \
|
||||
return; \
|
||||
\
|
||||
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
|
||||
\
|
||||
entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
|
||||
sizeof(*entry) + __data_size); \
|
||||
\
|
||||
if (!entry) \
|
||||
return; \
|
||||
\
|
||||
tstruct \
|
||||
\
|
||||
{ assign; } \
|
||||
\
|
||||
trace_event_buffer_commit(&fbuffer); \
|
||||
}
|
||||
/*
|
||||
* The ftrace_test_probe is compiled out, it is only here as a build time check
|
||||
* to make sure that if the tracepoint handling changes, the ftrace probe will
|
||||
* fail to compile unless it too is updated.
|
||||
*/
|
||||
|
||||
#undef DEFINE_EVENT
|
||||
#define DEFINE_EVENT(template, call, proto, args) \
|
||||
static inline void ftrace_test_probe_##call(void) \
|
||||
{ \
|
||||
check_trace_callback_type_##call(trace_event_raw_event_##template); \
|
||||
}
|
||||
|
||||
#undef DEFINE_EVENT_PRINT
|
||||
#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#undef __entry
|
||||
#define __entry REC
|
||||
|
||||
#undef __print_flags
|
||||
#undef __print_symbolic
|
||||
#undef __print_hex
|
||||
#undef __get_dynamic_array
|
||||
#undef __get_dynamic_array_len
|
||||
#undef __get_str
|
||||
#undef __get_bitmask
|
||||
#undef __print_array
|
||||
|
||||
#undef TP_printk
|
||||
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
|
||||
|
||||
#undef DECLARE_EVENT_CLASS
|
||||
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
||||
_TRACE_PERF_PROTO(call, PARAMS(proto)); \
|
||||
static char print_fmt_##call[] = print; \
|
||||
static struct trace_event_class __used __refdata event_class_##call = { \
|
||||
.system = TRACE_SYSTEM_STRING, \
|
||||
.define_fields = trace_event_define_fields_##call, \
|
||||
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
|
||||
.raw_init = trace_event_raw_init, \
|
||||
.probe = trace_event_raw_event_##call, \
|
||||
.reg = trace_event_reg, \
|
||||
_TRACE_PERF_INIT(call) \
|
||||
};
|
||||
|
||||
#undef DEFINE_EVENT
|
||||
#define DEFINE_EVENT(template, call, proto, args) \
|
||||
\
|
||||
static struct trace_event_call __used event_##call = { \
|
||||
.class = &event_class_##template, \
|
||||
{ \
|
||||
.tp = &__tracepoint_##call, \
|
||||
}, \
|
||||
.event.funcs = &trace_event_type_funcs_##template, \
|
||||
.print_fmt = print_fmt_##template, \
|
||||
.flags = TRACE_EVENT_FL_TRACEPOINT, \
|
||||
}; \
|
||||
static struct trace_event_call __used \
|
||||
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
||||
|
||||
#undef DEFINE_EVENT_PRINT
|
||||
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
|
||||
\
|
||||
static char print_fmt_##call[] = print; \
|
||||
\
|
||||
static struct trace_event_call __used event_##call = { \
|
||||
.class = &event_class_##template, \
|
||||
{ \
|
||||
.tp = &__tracepoint_##call, \
|
||||
}, \
|
||||
.event.funcs = &trace_event_type_funcs_##call, \
|
||||
.print_fmt = print_fmt_##call, \
|
||||
.flags = TRACE_EVENT_FL_TRACEPOINT, \
|
||||
}; \
|
||||
static struct trace_event_call __used \
|
||||
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
|
||||
#undef TRACE_SYSTEM_VAR
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
#undef __entry
|
||||
#define __entry entry
|
||||
|
||||
#undef __get_dynamic_array
|
||||
#define __get_dynamic_array(field) \
|
||||
((void *)__entry + (__entry->__data_loc_##field & 0xffff))
|
||||
|
||||
#undef __get_dynamic_array_len
|
||||
#define __get_dynamic_array_len(field) \
|
||||
((__entry->__data_loc_##field >> 16) & 0xffff)
|
||||
|
||||
#undef __get_str
|
||||
#define __get_str(field) (char *)__get_dynamic_array(field)
|
||||
|
||||
#undef __get_bitmask
|
||||
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
|
||||
|
||||
#undef __perf_addr
|
||||
#define __perf_addr(a) (__addr = (a))
|
||||
|
||||
#undef __perf_count
|
||||
#define __perf_count(c) (__count = (c))
|
||||
|
||||
#undef __perf_task
|
||||
#define __perf_task(t) (__task = (t))
|
||||
|
||||
#undef DECLARE_EVENT_CLASS
|
||||
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
|
||||
static notrace void \
|
||||
perf_trace_##call(void *__data, proto) \
|
||||
{ \
|
||||
struct trace_event_call *event_call = __data; \
|
||||
struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
|
||||
struct trace_event_raw_##call *entry; \
|
||||
struct pt_regs *__regs; \
|
||||
u64 __addr = 0, __count = 1; \
|
||||
struct task_struct *__task = NULL; \
|
||||
struct hlist_head *head; \
|
||||
int __entry_size; \
|
||||
int __data_size; \
|
||||
int rctx; \
|
||||
\
|
||||
__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
|
||||
\
|
||||
head = this_cpu_ptr(event_call->perf_events); \
|
||||
if (__builtin_constant_p(!__task) && !__task && \
|
||||
hlist_empty(head)) \
|
||||
return; \
|
||||
\
|
||||
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
|
||||
sizeof(u64)); \
|
||||
__entry_size -= sizeof(u32); \
|
||||
\
|
||||
entry = perf_trace_buf_prepare(__entry_size, \
|
||||
event_call->event.type, &__regs, &rctx); \
|
||||
if (!entry) \
|
||||
return; \
|
||||
\
|
||||
perf_fetch_caller_regs(__regs); \
|
||||
\
|
||||
tstruct \
|
||||
\
|
||||
{ assign; } \
|
||||
\
|
||||
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
|
||||
__count, __regs, head, __task); \
|
||||
}
|
||||
|
||||
/*
|
||||
* This part is compiled out, it is only here as a build time check
|
||||
* to make sure that if the tracepoint handling changes, the
|
||||
* perf probe will fail to compile unless it too is updated.
|
||||
*/
|
||||
#undef DEFINE_EVENT
|
||||
#define DEFINE_EVENT(template, call, proto, args) \
|
||||
static inline void perf_test_probe_##call(void) \
|
||||
{ \
|
||||
check_trace_callback_type_##call(perf_trace_##template); \
|
||||
}
|
||||
|
||||
|
||||
#undef DEFINE_EVENT_PRINT
|
||||
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
|
||||
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
|
||||
|
||||
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
@@ -29,8 +29,8 @@ struct syscall_metadata {
|
||||
const char **args;
|
||||
struct list_head enter_fields;
|
||||
|
||||
struct ftrace_event_call *enter_event;
|
||||
struct ftrace_event_call *exit_event;
|
||||
struct trace_event_call *enter_event;
|
||||
struct trace_event_call *exit_event;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -36,7 +36,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
+1
-1
@@ -18,7 +18,7 @@
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
+1
-1
@@ -35,7 +35,7 @@
|
||||
#include <linux/time.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
|
||||
#include "rcu.h"
|
||||
|
||||
|
||||
+1
-1
@@ -54,7 +54,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "tree.h"
|
||||
|
||||
@@ -1448,14 +1448,14 @@ static struct trace_event trace_blk_event = {
|
||||
|
||||
static int __init init_blk_tracer(void)
|
||||
{
|
||||
if (!register_ftrace_event(&trace_blk_event)) {
|
||||
if (!register_trace_event(&trace_blk_event)) {
|
||||
pr_warning("Warning: could not register block events\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (register_tracer(&blk_tracer) != 0) {
|
||||
pr_warning("Warning: could not register the block tracer\n");
|
||||
unregister_ftrace_event(&trace_blk_event);
|
||||
unregister_trace_event(&trace_blk_event);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
+89
-132
@@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
|
||||
*/
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/trace_events.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/trace_clock.h>
|
||||
#include <linux/trace_seq.h>
|
||||
@@ -115,63 +115,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* A fast way to enable or disable all ring buffers is to
|
||||
* call tracing_on or tracing_off. Turning off the ring buffers
|
||||
* prevents all ring buffers from being recorded to.
|
||||
* Turning this switch on, makes it OK to write to the
|
||||
* ring buffer, if the ring buffer is enabled itself.
|
||||
*
|
||||
* There's three layers that must be on in order to write
|
||||
* to the ring buffer.
|
||||
*
|
||||
* 1) This global flag must be set.
|
||||
* 2) The ring buffer must be enabled for recording.
|
||||
* 3) The per cpu buffer must be enabled for recording.
|
||||
*
|
||||
* In case of an anomaly, this global flag has a bit set that
|
||||
* will permantly disable all ring buffers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Global flag to disable all recording to ring buffers
|
||||
* This has two bits: ON, DISABLED
|
||||
*
|
||||
* ON DISABLED
|
||||
* ---- ----------
|
||||
* 0 0 : ring buffers are off
|
||||
* 1 0 : ring buffers are on
|
||||
* X 1 : ring buffers are permanently disabled
|
||||
*/
|
||||
|
||||
enum {
|
||||
RB_BUFFERS_ON_BIT = 0,
|
||||
RB_BUFFERS_DISABLED_BIT = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
|
||||
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
|
||||
};
|
||||
|
||||
static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
|
||||
|
||||
/* Used for individual buffers (after the counter) */
|
||||
#define RB_BUFFER_OFF (1 << 20)
|
||||
|
||||
#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
|
||||
|
||||
/**
|
||||
* tracing_off_permanent - permanently disable ring buffers
|
||||
*
|
||||
* This function, once called, will disable all ring buffers
|
||||
* permanently.
|
||||
*/
|
||||
void tracing_off_permanent(void)
|
||||
{
|
||||
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
|
||||
}
|
||||
|
||||
#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
|
||||
#define RB_ALIGNMENT 4U
|
||||
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
|
||||
@@ -451,6 +399,23 @@ struct rb_irq_work {
|
||||
bool wakeup_full;
|
||||
};
|
||||
|
||||
/*
|
||||
* Used for which event context the event is in.
|
||||
* NMI = 0
|
||||
* IRQ = 1
|
||||
* SOFTIRQ = 2
|
||||
* NORMAL = 3
|
||||
*
|
||||
* See trace_recursive_lock() comment below for more details.
|
||||
*/
|
||||
enum {
|
||||
RB_CTX_NMI,
|
||||
RB_CTX_IRQ,
|
||||
RB_CTX_SOFTIRQ,
|
||||
RB_CTX_NORMAL,
|
||||
RB_CTX_MAX
|
||||
};
|
||||
|
||||
/*
|
||||
* head_page == tail_page && head == tail then buffer is empty.
|
||||
*/
|
||||
@@ -462,6 +427,7 @@ struct ring_buffer_per_cpu {
|
||||
arch_spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
unsigned int nr_pages;
|
||||
unsigned int current_context;
|
||||
struct list_head *pages;
|
||||
struct buffer_page *head_page; /* read from head */
|
||||
struct buffer_page *tail_page; /* write to tail */
|
||||
@@ -2224,7 +2190,7 @@ static unsigned rb_calculate_event_length(unsigned length)
|
||||
|
||||
/* zero length can cause confusions */
|
||||
if (!length)
|
||||
length = 1;
|
||||
length++;
|
||||
|
||||
if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
|
||||
length += sizeof(event.array[0]);
|
||||
@@ -2636,8 +2602,6 @@ rb_reserve_next_event(struct ring_buffer *buffer,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
|
||||
/*
|
||||
* The lock and unlock are done within a preempt disable section.
|
||||
* The current_context per_cpu variable can only be modified
|
||||
@@ -2675,44 +2639,38 @@ rb_reserve_next_event(struct ring_buffer *buffer,
|
||||
* just so happens that it is the same bit corresponding to
|
||||
* the current context.
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned int, current_context);
|
||||
|
||||
static __always_inline int trace_recursive_lock(void)
|
||||
static __always_inline int
|
||||
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
unsigned int val = __this_cpu_read(current_context);
|
||||
unsigned int val = cpu_buffer->current_context;
|
||||
int bit;
|
||||
|
||||
if (in_interrupt()) {
|
||||
if (in_nmi())
|
||||
bit = 0;
|
||||
bit = RB_CTX_NMI;
|
||||
else if (in_irq())
|
||||
bit = 1;
|
||||
bit = RB_CTX_IRQ;
|
||||
else
|
||||
bit = 2;
|
||||
bit = RB_CTX_SOFTIRQ;
|
||||
} else
|
||||
bit = 3;
|
||||
bit = RB_CTX_NORMAL;
|
||||
|
||||
if (unlikely(val & (1 << bit)))
|
||||
return 1;
|
||||
|
||||
val |= (1 << bit);
|
||||
__this_cpu_write(current_context, val);
|
||||
cpu_buffer->current_context = val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void trace_recursive_unlock(void)
|
||||
static __always_inline void
|
||||
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
__this_cpu_and(current_context, __this_cpu_read(current_context) - 1);
|
||||
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define trace_recursive_lock() (0)
|
||||
#define trace_recursive_unlock() do { } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ring_buffer_lock_reserve - reserve a part of the buffer
|
||||
* @buffer: the ring buffer to reserve from
|
||||
@@ -2735,41 +2693,37 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
|
||||
struct ring_buffer_event *event;
|
||||
int cpu;
|
||||
|
||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||
return NULL;
|
||||
|
||||
/* If we are tracing schedule, we don't want to recurse */
|
||||
preempt_disable_notrace();
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
goto out_nocheck;
|
||||
|
||||
if (trace_recursive_lock())
|
||||
goto out_nocheck;
|
||||
if (unlikely(atomic_read(&buffer->record_disabled)))
|
||||
goto out;
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
|
||||
goto out;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
|
||||
if (atomic_read(&cpu_buffer->record_disabled))
|
||||
if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
|
||||
goto out;
|
||||
|
||||
if (length > BUF_MAX_DATA_SIZE)
|
||||
if (unlikely(length > BUF_MAX_DATA_SIZE))
|
||||
goto out;
|
||||
|
||||
if (unlikely(trace_recursive_lock(cpu_buffer)))
|
||||
goto out;
|
||||
|
||||
event = rb_reserve_next_event(buffer, cpu_buffer, length);
|
||||
if (!event)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
|
||||
return event;
|
||||
|
||||
out_unlock:
|
||||
trace_recursive_unlock(cpu_buffer);
|
||||
out:
|
||||
trace_recursive_unlock();
|
||||
|
||||
out_nocheck:
|
||||
preempt_enable_notrace();
|
||||
return NULL;
|
||||
}
|
||||
@@ -2859,7 +2813,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
|
||||
|
||||
rb_wakeups(buffer, cpu_buffer);
|
||||
|
||||
trace_recursive_unlock();
|
||||
trace_recursive_unlock(cpu_buffer);
|
||||
|
||||
preempt_enable_notrace();
|
||||
|
||||
@@ -2970,7 +2924,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
|
||||
out:
|
||||
rb_end_commit(cpu_buffer);
|
||||
|
||||
trace_recursive_unlock();
|
||||
trace_recursive_unlock(cpu_buffer);
|
||||
|
||||
preempt_enable_notrace();
|
||||
|
||||
@@ -3000,9 +2954,6 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||
int ret = -EBUSY;
|
||||
int cpu;
|
||||
|
||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||
return -EBUSY;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
if (atomic_read(&buffer->record_disabled))
|
||||
@@ -3021,9 +2972,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||
if (length > BUF_MAX_DATA_SIZE)
|
||||
goto out;
|
||||
|
||||
if (unlikely(trace_recursive_lock(cpu_buffer)))
|
||||
goto out;
|
||||
|
||||
event = rb_reserve_next_event(buffer, cpu_buffer, length);
|
||||
if (!event)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
|
||||
body = rb_event_data(event);
|
||||
|
||||
@@ -3034,6 +2988,10 @@ int ring_buffer_write(struct ring_buffer *buffer,
|
||||
rb_wakeups(buffer, cpu_buffer);
|
||||
|
||||
ret = 0;
|
||||
|
||||
out_unlock:
|
||||
trace_recursive_unlock(cpu_buffer);
|
||||
|
||||
out:
|
||||
preempt_enable_notrace();
|
||||
|
||||
@@ -3860,19 +3818,36 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
|
||||
|
||||
static inline int rb_ok_to_lock(void)
|
||||
static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
{
|
||||
if (likely(!in_nmi())) {
|
||||
raw_spin_lock(&cpu_buffer->reader_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If an NMI die dumps out the content of the ring buffer
|
||||
* do not grab locks. We also permanently disable the ring
|
||||
* buffer too. A one time deal is all you get from reading
|
||||
* the ring buffer from an NMI.
|
||||
* trylock must be used to prevent a deadlock if the NMI
|
||||
* preempted a task that holds the ring buffer locks. If
|
||||
* we get the lock then all is fine, if not, then continue
|
||||
* to do the read, but this can corrupt the ring buffer,
|
||||
* so it must be permanently disabled from future writes.
|
||||
* Reading from NMI is a oneshot deal.
|
||||
*/
|
||||
if (likely(!in_nmi()))
|
||||
return 1;
|
||||
if (raw_spin_trylock(&cpu_buffer->reader_lock))
|
||||
return true;
|
||||
|
||||
tracing_off_permanent();
|
||||
return 0;
|
||||
/* Continue without locking, but disable the ring buffer */
|
||||
atomic_inc(&cpu_buffer->record_disabled);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void
|
||||
rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
|
||||
{
|
||||
if (likely(locked))
|
||||
raw_spin_unlock(&cpu_buffer->reader_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3892,21 +3867,18 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
bool dolock;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return NULL;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
again:
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
raw_spin_lock(&cpu_buffer->reader_lock);
|
||||
dolock = rb_reader_lock(cpu_buffer);
|
||||
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
||||
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
||||
rb_advance_reader(cpu_buffer);
|
||||
if (dolock)
|
||||
raw_spin_unlock(&cpu_buffer->reader_lock);
|
||||
rb_reader_unlock(cpu_buffer, dolock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (event && event->type_len == RINGBUF_TYPE_PADDING)
|
||||
@@ -3959,9 +3931,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_event *event = NULL;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
bool dolock;
|
||||
|
||||
again:
|
||||
/* might be called in atomic */
|
||||
@@ -3972,8 +3942,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
raw_spin_lock(&cpu_buffer->reader_lock);
|
||||
dolock = rb_reader_lock(cpu_buffer);
|
||||
|
||||
event = rb_buffer_peek(cpu_buffer, ts, lost_events);
|
||||
if (event) {
|
||||
@@ -3981,8 +3950,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
|
||||
rb_advance_reader(cpu_buffer);
|
||||
}
|
||||
|
||||
if (dolock)
|
||||
raw_spin_unlock(&cpu_buffer->reader_lock);
|
||||
rb_reader_unlock(cpu_buffer, dolock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
out:
|
||||
@@ -4263,21 +4231,17 @@ int ring_buffer_empty(struct ring_buffer *buffer)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
bool dolock;
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
|
||||
/* yes this is racy, but if you don't like the race, lock the buffer */
|
||||
for_each_buffer_cpu(buffer, cpu) {
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
raw_spin_lock(&cpu_buffer->reader_lock);
|
||||
dolock = rb_reader_lock(cpu_buffer);
|
||||
ret = rb_per_cpu_empty(cpu_buffer);
|
||||
if (dolock)
|
||||
raw_spin_unlock(&cpu_buffer->reader_lock);
|
||||
rb_reader_unlock(cpu_buffer, dolock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (!ret)
|
||||
@@ -4297,21 +4261,17 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
unsigned long flags;
|
||||
int dolock;
|
||||
bool dolock;
|
||||
int ret;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
||||
return 1;
|
||||
|
||||
dolock = rb_ok_to_lock();
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
local_irq_save(flags);
|
||||
if (dolock)
|
||||
raw_spin_lock(&cpu_buffer->reader_lock);
|
||||
dolock = rb_reader_lock(cpu_buffer);
|
||||
ret = rb_per_cpu_empty(cpu_buffer);
|
||||
if (dolock)
|
||||
raw_spin_unlock(&cpu_buffer->reader_lock);
|
||||
rb_reader_unlock(cpu_buffer, dolock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
@@ -4349,9 +4309,6 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
|
||||
|
||||
ret = -EAGAIN;
|
||||
|
||||
if (ring_buffer_flags != RB_BUFFERS_ON)
|
||||
goto out;
|
||||
|
||||
if (atomic_read(&buffer_a->record_disabled))
|
||||
goto out;
|
||||
|
||||
|
||||
@@ -32,11 +32,11 @@ static struct task_struct *producer;
|
||||
static struct task_struct *consumer;
|
||||
static unsigned long read;
|
||||
|
||||
static int disable_reader;
|
||||
static unsigned int disable_reader;
|
||||
module_param(disable_reader, uint, 0644);
|
||||
MODULE_PARM_DESC(disable_reader, "only run producer");
|
||||
|
||||
static int write_iteration = 50;
|
||||
static unsigned int write_iteration = 50;
|
||||
module_param(write_iteration, uint, 0644);
|
||||
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
|
||||
|
||||
@@ -46,16 +46,16 @@ static int consumer_nice = MAX_NICE;
|
||||
static int producer_fifo = -1;
|
||||
static int consumer_fifo = -1;
|
||||
|
||||
module_param(producer_nice, uint, 0644);
|
||||
module_param(producer_nice, int, 0644);
|
||||
MODULE_PARM_DESC(producer_nice, "nice prio for producer");
|
||||
|
||||
module_param(consumer_nice, uint, 0644);
|
||||
module_param(consumer_nice, int, 0644);
|
||||
MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
|
||||
|
||||
module_param(producer_fifo, uint, 0644);
|
||||
module_param(producer_fifo, int, 0644);
|
||||
MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
|
||||
|
||||
module_param(consumer_fifo, uint, 0644);
|
||||
module_param(consumer_fifo, int, 0644);
|
||||
MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
|
||||
|
||||
static int read_events;
|
||||
@@ -263,6 +263,8 @@ static void ring_buffer_producer(void)
|
||||
if (cnt % wakeup_interval)
|
||||
cond_resched();
|
||||
#endif
|
||||
if (kthread_should_stop())
|
||||
kill_test = 1;
|
||||
|
||||
} while (ktime_before(end_time, timeout) && !kill_test);
|
||||
trace_printk("End ring buffer hammer\n");
|
||||
@@ -285,7 +287,7 @@ static void ring_buffer_producer(void)
|
||||
entries = ring_buffer_entries(buffer);
|
||||
overruns = ring_buffer_overruns(buffer);
|
||||
|
||||
if (kill_test)
|
||||
if (kill_test && !kthread_should_stop())
|
||||
trace_printk("ERROR!\n");
|
||||
|
||||
if (!disable_reader) {
|
||||
@@ -379,7 +381,7 @@ static int ring_buffer_consumer_thread(void *arg)
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
if (kill_test)
|
||||
if (!kthread_should_stop())
|
||||
wait_to_die();
|
||||
|
||||
return 0;
|
||||
@@ -399,13 +401,16 @@ static int ring_buffer_producer_thread(void *arg)
|
||||
}
|
||||
|
||||
ring_buffer_producer();
|
||||
if (kill_test)
|
||||
goto out_kill;
|
||||
|
||||
trace_printk("Sleeping for 10 secs\n");
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(HZ * SLEEP_TIME);
|
||||
}
|
||||
|
||||
if (kill_test)
|
||||
out_kill:
|
||||
if (!kthread_should_stop())
|
||||
wait_to_die();
|
||||
|
||||
return 0;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user