You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (311 commits) perf tools: Add mode to build without newt support perf symbols: symbol inconsistency message should be done only at verbose=1 perf tui: Add explicit -lslang option perf options: Type check all the remaining OPT_ variants perf options: Type check OPT_BOOLEAN and fix the offenders perf options: Check v type in OPT_U?INTEGER perf options: Introduce OPT_UINTEGER perf tui: Add workaround for slang < 2.1.4 perf record: Fix bug mismatch with -c option definition perf options: Introduce OPT_U64 perf tui: Add help window to show key associations perf tui: Make <- exit menus too perf newt: Add single key shortcuts for zoom into DSO and threads perf newt: Exit browser unconditionally when CTRL+C, q or Q is pressed perf newt: Fix the 'A'/'a' shortcut for annotate perf newt: Make <- exit the ui_browser x86, perf: P4 PMU - fix counters management logic perf newt: Make <- zoom out filters perf report: Report number of events, not samples perf hist: Clarify events_stats fields usage ... Fix up trivial conflicts in kernel/fork.c and tools/perf/builtin-record.c
This commit is contained in:
@@ -1112,8 +1112,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
p->memcg_batch.memcg = NULL;
|
||||
#endif
|
||||
|
||||
p->bts = NULL;
|
||||
|
||||
/* Perform scheduler related setup. Assign this task to a CPU. */
|
||||
sched_fork(p, clone_flags);
|
||||
|
||||
|
||||
+148
-48
@@ -40,23 +40,29 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
|
||||
/*
|
||||
* Constraints data
|
||||
*/
|
||||
|
||||
/* Number of pinned cpu breakpoints in a cpu */
|
||||
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
|
||||
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
|
||||
|
||||
/* Number of pinned task breakpoints in a cpu */
|
||||
static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]);
|
||||
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
|
||||
|
||||
/* Number of non-pinned cpu/task breakpoints in a cpu */
|
||||
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
|
||||
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
|
||||
|
||||
static int nr_slots[TYPE_MAX];
|
||||
|
||||
static int constraints_initialized;
|
||||
|
||||
/* Gather the number of total pinned and un-pinned bp in a cpuset */
|
||||
struct bp_busy_slots {
|
||||
@@ -67,16 +73,29 @@ struct bp_busy_slots {
|
||||
/* Serialize accesses to the above constraints */
|
||||
static DEFINE_MUTEX(nr_bp_mutex);
|
||||
|
||||
__weak int hw_breakpoint_weight(struct perf_event *bp)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
|
||||
{
|
||||
if (bp->attr.bp_type & HW_BREAKPOINT_RW)
|
||||
return TYPE_DATA;
|
||||
|
||||
return TYPE_INST;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the maximum number of pinned breakpoints a task
|
||||
* have in this cpu
|
||||
*/
|
||||
static unsigned int max_task_bp_pinned(int cpu)
|
||||
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
|
||||
{
|
||||
int i;
|
||||
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
|
||||
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
|
||||
|
||||
for (i = HBP_NUM -1; i >= 0; i--) {
|
||||
for (i = nr_slots[type] - 1; i >= 0; i--) {
|
||||
if (tsk_pinned[i] > 0)
|
||||
return i + 1;
|
||||
}
|
||||
@@ -84,7 +103,7 @@ static unsigned int max_task_bp_pinned(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int task_bp_pinned(struct task_struct *tsk)
|
||||
static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
|
||||
{
|
||||
struct perf_event_context *ctx = tsk->perf_event_ctxp;
|
||||
struct list_head *list;
|
||||
@@ -105,7 +124,8 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||
*/
|
||||
list_for_each_entry(bp, list, event_entry) {
|
||||
if (bp->attr.type == PERF_TYPE_BREAKPOINT)
|
||||
count++;
|
||||
if (find_slot_idx(bp) == type)
|
||||
count += hw_breakpoint_weight(bp);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
@@ -118,18 +138,19 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||
* a given cpu (cpu > -1) or in all of them (cpu = -1).
|
||||
*/
|
||||
static void
|
||||
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
|
||||
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
|
||||
enum bp_type_idx type)
|
||||
{
|
||||
int cpu = bp->cpu;
|
||||
struct task_struct *tsk = bp->ctx->task;
|
||||
|
||||
if (cpu >= 0) {
|
||||
slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
|
||||
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
|
||||
if (!tsk)
|
||||
slots->pinned += max_task_bp_pinned(cpu);
|
||||
slots->pinned += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
slots->pinned += task_bp_pinned(tsk);
|
||||
slots->flexible = per_cpu(nr_bp_flexible, cpu);
|
||||
slots->pinned += task_bp_pinned(tsk, type);
|
||||
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -137,48 +158,66 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp)
|
||||
for_each_online_cpu(cpu) {
|
||||
unsigned int nr;
|
||||
|
||||
nr = per_cpu(nr_cpu_bp_pinned, cpu);
|
||||
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
|
||||
if (!tsk)
|
||||
nr += max_task_bp_pinned(cpu);
|
||||
nr += max_task_bp_pinned(cpu, type);
|
||||
else
|
||||
nr += task_bp_pinned(tsk);
|
||||
nr += task_bp_pinned(tsk, type);
|
||||
|
||||
if (nr > slots->pinned)
|
||||
slots->pinned = nr;
|
||||
|
||||
nr = per_cpu(nr_bp_flexible, cpu);
|
||||
nr = per_cpu(nr_bp_flexible[type], cpu);
|
||||
|
||||
if (nr > slots->flexible)
|
||||
slots->flexible = nr;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For now, continue to consider flexible as pinned, until we can
|
||||
* ensure no flexible event can ever be scheduled before a pinned event
|
||||
* in a same cpu.
|
||||
*/
|
||||
static void
|
||||
fetch_this_slot(struct bp_busy_slots *slots, int weight)
|
||||
{
|
||||
slots->pinned += weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a pinned breakpoint for the given task in our constraint table
|
||||
*/
|
||||
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
|
||||
static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
|
||||
enum bp_type_idx type, int weight)
|
||||
{
|
||||
unsigned int *tsk_pinned;
|
||||
int count = 0;
|
||||
int old_count = 0;
|
||||
int old_idx = 0;
|
||||
int idx = 0;
|
||||
|
||||
count = task_bp_pinned(tsk);
|
||||
old_count = task_bp_pinned(tsk, type);
|
||||
old_idx = old_count - 1;
|
||||
idx = old_idx + weight;
|
||||
|
||||
tsk_pinned = per_cpu(nr_task_bp_pinned, cpu);
|
||||
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
|
||||
if (enable) {
|
||||
tsk_pinned[count]++;
|
||||
if (count > 0)
|
||||
tsk_pinned[count-1]--;
|
||||
tsk_pinned[idx]++;
|
||||
if (old_count > 0)
|
||||
tsk_pinned[old_idx]--;
|
||||
} else {
|
||||
tsk_pinned[count]--;
|
||||
if (count > 0)
|
||||
tsk_pinned[count-1]++;
|
||||
tsk_pinned[idx]--;
|
||||
if (old_count > 0)
|
||||
tsk_pinned[old_idx]++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add/remove the given breakpoint in our constraint table
|
||||
*/
|
||||
static void toggle_bp_slot(struct perf_event *bp, bool enable)
|
||||
static void
|
||||
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
|
||||
int weight)
|
||||
{
|
||||
int cpu = bp->cpu;
|
||||
struct task_struct *tsk = bp->ctx->task;
|
||||
@@ -186,20 +225,20 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
|
||||
/* Pinned counter task profiling */
|
||||
if (tsk) {
|
||||
if (cpu >= 0) {
|
||||
toggle_bp_task_slot(tsk, cpu, enable);
|
||||
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
toggle_bp_task_slot(tsk, cpu, enable);
|
||||
toggle_bp_task_slot(tsk, cpu, enable, type, weight);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Pinned counter cpu profiling */
|
||||
if (enable)
|
||||
per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
|
||||
else
|
||||
per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
|
||||
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -246,14 +285,29 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
|
||||
static int __reserve_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
struct bp_busy_slots slots = {0};
|
||||
enum bp_type_idx type;
|
||||
int weight;
|
||||
|
||||
fetch_bp_busy_slots(&slots, bp);
|
||||
/* We couldn't initialize breakpoint constraints on boot */
|
||||
if (!constraints_initialized)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Basic checks */
|
||||
if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
|
||||
bp->attr.bp_type == HW_BREAKPOINT_INVALID)
|
||||
return -EINVAL;
|
||||
|
||||
type = find_slot_idx(bp);
|
||||
weight = hw_breakpoint_weight(bp);
|
||||
|
||||
fetch_bp_busy_slots(&slots, bp, type);
|
||||
fetch_this_slot(&slots, weight);
|
||||
|
||||
/* Flexible counters need to keep at least one slot */
|
||||
if (slots.pinned + (!!slots.flexible) == HBP_NUM)
|
||||
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
|
||||
return -ENOSPC;
|
||||
|
||||
toggle_bp_slot(bp, true);
|
||||
toggle_bp_slot(bp, true, type, weight);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -273,7 +327,12 @@ int reserve_bp_slot(struct perf_event *bp)
|
||||
|
||||
static void __release_bp_slot(struct perf_event *bp)
|
||||
{
|
||||
toggle_bp_slot(bp, false);
|
||||
enum bp_type_idx type;
|
||||
int weight;
|
||||
|
||||
type = find_slot_idx(bp);
|
||||
weight = hw_breakpoint_weight(bp);
|
||||
toggle_bp_slot(bp, false, type, weight);
|
||||
}
|
||||
|
||||
void release_bp_slot(struct perf_event *bp)
|
||||
@@ -308,6 +367,28 @@ int dbg_release_bp_slot(struct perf_event *bp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int validate_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = arch_validate_hwbkpt_settings(bp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (arch_check_bp_in_kernelspace(bp)) {
|
||||
if (bp->attr.exclude_kernel)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Don't let unprivileged users set a breakpoint in the trap
|
||||
* path to avoid trap recursion attacks.
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
{
|
||||
int ret;
|
||||
@@ -316,17 +397,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Ptrace breakpoints can be temporary perf events only
|
||||
* meant to reserve a slot. In this case, it is created disabled and
|
||||
* we don't want to check the params right now (as we put a null addr)
|
||||
* But perf tools create events as disabled and we want to check
|
||||
* the params for them.
|
||||
* This is a quick hack that will be removed soon, once we remove
|
||||
* the tmp breakpoints from ptrace
|
||||
*/
|
||||
if (!bp->attr.disabled || !bp->overflow_handler)
|
||||
ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
|
||||
ret = validate_hw_breakpoint(bp);
|
||||
|
||||
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
|
||||
if (ret)
|
||||
@@ -373,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
|
||||
if (attr->disabled)
|
||||
goto end;
|
||||
|
||||
err = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
|
||||
err = validate_hw_breakpoint(bp);
|
||||
if (!err)
|
||||
perf_event_enable(bp);
|
||||
|
||||
@@ -480,7 +551,36 @@ static struct notifier_block hw_breakpoint_exceptions_nb = {
|
||||
|
||||
static int __init init_hw_breakpoint(void)
|
||||
{
|
||||
unsigned int **task_bp_pinned;
|
||||
int cpu, err_cpu;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TYPE_MAX; i++)
|
||||
nr_slots[i] = hw_breakpoint_slots(i);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for (i = 0; i < TYPE_MAX; i++) {
|
||||
task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
|
||||
*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
|
||||
GFP_KERNEL);
|
||||
if (!*task_bp_pinned)
|
||||
goto err_alloc;
|
||||
}
|
||||
}
|
||||
|
||||
constraints_initialized = 1;
|
||||
|
||||
return register_die_notifier(&hw_breakpoint_exceptions_nb);
|
||||
|
||||
err_alloc:
|
||||
for_each_possible_cpu(err_cpu) {
|
||||
if (err_cpu == cpu)
|
||||
break;
|
||||
for (i = 0; i < TYPE_MAX; i++)
|
||||
kfree(per_cpu(nr_task_bp_pinned[i], cpu));
|
||||
}
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
core_initcall(init_hw_breakpoint);
|
||||
|
||||
|
||||
+66
-66
@@ -1588,6 +1588,72 @@ static void __kprobes kill_kprobe(struct kprobe *p)
|
||||
arch_remove_kprobe(p);
|
||||
}
|
||||
|
||||
/* Disable one kprobe */
|
||||
int __kprobes disable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the probe is already disabled (or gone), just return */
|
||||
if (kprobe_disabled(kp))
|
||||
goto out;
|
||||
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
/* When kp != p, p is always enabled. */
|
||||
try_to_disable_aggr_kprobe(p);
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
disarm_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_kprobe);
|
||||
|
||||
/* Enable one kprobe */
|
||||
int __kprobes enable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kprobe_gone(kp)) {
|
||||
/* This kprobe has gone, we couldn't enable it. */
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (p != kp)
|
||||
kp->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
arm_kprobe(p);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_kprobe);
|
||||
|
||||
void __kprobes dump_kprobe(struct kprobe *kp)
|
||||
{
|
||||
printk(KERN_WARNING "Dumping kprobe:\n");
|
||||
@@ -1805,72 +1871,6 @@ static const struct file_operations debugfs_kprobes_operations = {
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/* Disable one kprobe */
|
||||
int __kprobes disable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the probe is already disabled (or gone), just return */
|
||||
if (kprobe_disabled(kp))
|
||||
goto out;
|
||||
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
/* When kp != p, p is always enabled. */
|
||||
try_to_disable_aggr_kprobe(p);
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p))
|
||||
disarm_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_kprobe);
|
||||
|
||||
/* Enable one kprobe */
|
||||
int __kprobes enable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kprobe_gone(kp)) {
|
||||
/* This kprobe has gone, we couldn't enable it. */
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (p != kp)
|
||||
kp->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
arm_kprobe(p);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_kprobe);
|
||||
|
||||
static void __kprobes arm_all_kprobes(void)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
|
||||
+2
-2
@@ -3232,7 +3232,7 @@ void lock_release(struct lockdep_map *lock, int nested,
|
||||
raw_local_irq_save(flags);
|
||||
check_flags(flags);
|
||||
current->lockdep_recursion = 1;
|
||||
trace_lock_release(lock, nested, ip);
|
||||
trace_lock_release(lock, ip);
|
||||
__lock_release(lock, nested, ip);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
@@ -3385,7 +3385,7 @@ found_it:
|
||||
hlock->holdtime_stamp = now;
|
||||
}
|
||||
|
||||
trace_lock_acquired(lock, ip, waittime);
|
||||
trace_lock_acquired(lock, ip);
|
||||
|
||||
stats = get_lock_stats(hlock_class(hlock));
|
||||
if (waittime) {
|
||||
|
||||
+275
-104
File diff suppressed because it is too large
Load Diff
@@ -75,7 +75,6 @@ void __ptrace_unlink(struct task_struct *child)
|
||||
child->parent = child->real_parent;
|
||||
list_del_init(&child->ptrace_entry);
|
||||
|
||||
arch_ptrace_untrace(child);
|
||||
if (task_is_traced(child))
|
||||
ptrace_untrace(child);
|
||||
}
|
||||
|
||||
@@ -2087,49 +2087,6 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait_task_context_switch - wait for a thread to complete at least one
|
||||
* context switch.
|
||||
*
|
||||
* @p must not be current.
|
||||
*/
|
||||
void wait_task_context_switch(struct task_struct *p)
|
||||
{
|
||||
unsigned long nvcsw, nivcsw, flags;
|
||||
int running;
|
||||
struct rq *rq;
|
||||
|
||||
nvcsw = p->nvcsw;
|
||||
nivcsw = p->nivcsw;
|
||||
for (;;) {
|
||||
/*
|
||||
* The runqueue is assigned before the actual context
|
||||
* switch. We need to take the runqueue lock.
|
||||
*
|
||||
* We could check initially without the lock but it is
|
||||
* very likely that we need to take the lock in every
|
||||
* iteration.
|
||||
*/
|
||||
rq = task_rq_lock(p, &flags);
|
||||
running = task_running(rq, p);
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
if (likely(!running))
|
||||
break;
|
||||
/*
|
||||
* The switch count is incremented before the actual
|
||||
* context switch. We thus wait for two switches to be
|
||||
* sure at least one completed.
|
||||
*/
|
||||
if ((p->nvcsw - nvcsw) > 1)
|
||||
break;
|
||||
if ((p->nivcsw - nivcsw) > 1)
|
||||
break;
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* wait_task_inactive - wait for a thread to unschedule.
|
||||
*
|
||||
|
||||
@@ -44,9 +44,6 @@ config HAVE_FTRACE_MCOUNT_RECORD
|
||||
help
|
||||
See Documentation/trace/ftrace-design.txt
|
||||
|
||||
config HAVE_HW_BRANCH_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_SYSCALL_TRACEPOINTS
|
||||
bool
|
||||
help
|
||||
@@ -374,14 +371,6 @@ config STACK_TRACER
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config HW_BRANCH_TRACER
|
||||
depends on HAVE_HW_BRANCH_TRACER
|
||||
bool "Trace hw branches"
|
||||
select GENERIC_TRACER
|
||||
help
|
||||
This tracer records all branches on the system in a circular
|
||||
buffer, giving access to the last N branches for each cpu.
|
||||
|
||||
config KMEMTRACE
|
||||
bool "Trace SLAB allocations"
|
||||
select GENERIC_TRACER
|
||||
|
||||
@@ -41,7 +41,6 @@ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
||||
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
|
||||
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
||||
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
||||
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
||||
|
||||
+2
-18
@@ -34,7 +34,6 @@ enum trace_type {
|
||||
TRACE_GRAPH_RET,
|
||||
TRACE_GRAPH_ENT,
|
||||
TRACE_USER_STACK,
|
||||
TRACE_HW_BRANCHES,
|
||||
TRACE_KMEM_ALLOC,
|
||||
TRACE_KMEM_FREE,
|
||||
TRACE_BLK,
|
||||
@@ -103,29 +102,17 @@ struct syscall_trace_exit {
|
||||
long ret;
|
||||
};
|
||||
|
||||
struct kprobe_trace_entry {
|
||||
struct kprobe_trace_entry_head {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
int nargs;
|
||||
unsigned long args[];
|
||||
};
|
||||
|
||||
#define SIZEOF_KPROBE_TRACE_ENTRY(n) \
|
||||
(offsetof(struct kprobe_trace_entry, args) + \
|
||||
(sizeof(unsigned long) * (n)))
|
||||
|
||||
struct kretprobe_trace_entry {
|
||||
struct kretprobe_trace_entry_head {
|
||||
struct trace_entry ent;
|
||||
unsigned long func;
|
||||
unsigned long ret_ip;
|
||||
int nargs;
|
||||
unsigned long args[];
|
||||
};
|
||||
|
||||
#define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
|
||||
(offsetof(struct kretprobe_trace_entry, args) + \
|
||||
(sizeof(unsigned long) * (n)))
|
||||
|
||||
/*
|
||||
* trace_flag_type is an enumeration that holds different
|
||||
* states when a trace occurs. These are:
|
||||
@@ -229,7 +216,6 @@ extern void __ftrace_bad_type(void);
|
||||
TRACE_GRAPH_ENT); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
|
||||
TRACE_GRAPH_RET); \
|
||||
IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \
|
||||
TRACE_KMEM_ALLOC); \
|
||||
IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
|
||||
@@ -467,8 +453,6 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_branch(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_hw_branches(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
extern int trace_selftest_startup_ksym(struct tracer *trace,
|
||||
struct trace_array *tr);
|
||||
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
||||
|
||||
@@ -318,18 +318,6 @@ FTRACE_ENTRY(branch, trace_branch,
|
||||
__entry->func, __entry->file, __entry->correct)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(hw_branch, hw_branch_entry,
|
||||
|
||||
TRACE_HW_BRANCHES,
|
||||
|
||||
F_STRUCT(
|
||||
__field( u64, from )
|
||||
__field( u64, to )
|
||||
),
|
||||
|
||||
F_printk("from: %llx to: %llx", __entry->from, __entry->to)
|
||||
);
|
||||
|
||||
FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry,
|
||||
|
||||
TRACE_KMEM_ALLOC,
|
||||
|
||||
@@ -1398,7 +1398,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
if (!call)
|
||||
if (&call->list == &ftrace_events)
|
||||
goto out_unlock;
|
||||
|
||||
err = -EEXIST;
|
||||
|
||||
@@ -1,312 +0,0 @@
|
||||
/*
|
||||
* h/w branch tracer for x86 based on BTS
|
||||
*
|
||||
* Copyright (C) 2008-2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
|
||||
*/
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <asm/ds.h>
|
||||
|
||||
#include "trace_output.h"
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
#define BTS_BUFFER_SIZE (1 << 13)
|
||||
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
|
||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
|
||||
|
||||
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
|
||||
|
||||
static int trace_hw_branches_enabled __read_mostly;
|
||||
static int trace_hw_branches_suspended __read_mostly;
|
||||
static struct trace_array *hw_branch_trace __read_mostly;
|
||||
|
||||
|
||||
static void bts_trace_init_cpu(int cpu)
|
||||
{
|
||||
per_cpu(hwb_tracer, cpu) =
|
||||
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
|
||||
BTS_BUFFER_SIZE, NULL, (size_t)-1,
|
||||
BTS_KERNEL);
|
||||
|
||||
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
|
||||
static int bts_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
hw_branch_trace = tr;
|
||||
trace_hw_branches_enabled = 0;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
bts_trace_init_cpu(cpu);
|
||||
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
trace_hw_branches_enabled = 1;
|
||||
}
|
||||
trace_hw_branches_suspended = 0;
|
||||
put_online_cpus();
|
||||
|
||||
/* If we could not enable tracing on a single cpu, we fail. */
|
||||
return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void bts_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
}
|
||||
trace_hw_branches_enabled = 0;
|
||||
trace_hw_branches_suspended = 0;
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void bts_trace_start(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||
trace_hw_branches_suspended = 0;
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void bts_trace_stop(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
trace_hw_branches_suspended = 1;
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int cpu = (long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
/* The notification is sent with interrupts enabled. */
|
||||
if (trace_hw_branches_enabled) {
|
||||
bts_trace_init_cpu(cpu);
|
||||
|
||||
if (trace_hw_branches_suspended &&
|
||||
likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
}
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
/* The notification is sent with interrupts enabled. */
|
||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
|
||||
.notifier_call = bts_hotcpu_handler
|
||||
};
|
||||
|
||||
static void bts_trace_print_header(struct seq_file *m)
|
||||
{
|
||||
seq_puts(m, "# CPU# TO <- FROM\n");
|
||||
}
|
||||
|
||||
static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
|
||||
{
|
||||
unsigned long symflags = TRACE_ITER_SYM_OFFSET;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *seq = &iter->seq;
|
||||
struct hw_branch_entry *it;
|
||||
|
||||
trace_assign_type(it, entry);
|
||||
|
||||
if (entry->type == TRACE_HW_BRANCHES) {
|
||||
if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
|
||||
seq_print_ip_sym(seq, it->to, symflags) &&
|
||||
trace_seq_printf(seq, "\t <- ") &&
|
||||
seq_print_ip_sym(seq, it->from, symflags) &&
|
||||
trace_seq_printf(seq, "\n"))
|
||||
return TRACE_TYPE_HANDLED;
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
||||
void trace_hw_branch(u64 from, u64 to)
|
||||
{
|
||||
struct ftrace_event_call *call = &event_hw_branch;
|
||||
struct trace_array *tr = hw_branch_trace;
|
||||
struct ring_buffer_event *event;
|
||||
struct ring_buffer *buf;
|
||||
struct hw_branch_entry *entry;
|
||||
unsigned long irq1;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
if (unlikely(!trace_hw_branches_enabled))
|
||||
return;
|
||||
|
||||
local_irq_save(irq1);
|
||||
cpu = raw_smp_processor_id();
|
||||
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
|
||||
goto out;
|
||||
|
||||
buf = tr->buffer;
|
||||
event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
|
||||
sizeof(*entry), 0, 0);
|
||||
if (!event)
|
||||
goto out;
|
||||
entry = ring_buffer_event_data(event);
|
||||
tracing_generic_entry_update(&entry->ent, 0, from);
|
||||
entry->ent.type = TRACE_HW_BRANCHES;
|
||||
entry->from = from;
|
||||
entry->to = to;
|
||||
if (!filter_check_discard(call, entry, buf, event))
|
||||
trace_buffer_unlock_commit(buf, event, 0, 0);
|
||||
|
||||
out:
|
||||
atomic_dec(&tr->data[cpu]->disabled);
|
||||
local_irq_restore(irq1);
|
||||
}
|
||||
|
||||
static void trace_bts_at(const struct bts_trace *trace, void *at)
|
||||
{
|
||||
struct bts_struct bts;
|
||||
int err = 0;
|
||||
|
||||
WARN_ON_ONCE(!trace->read);
|
||||
if (!trace->read)
|
||||
return;
|
||||
|
||||
err = trace->read(this_tracer, at, &bts);
|
||||
if (err < 0)
|
||||
return;
|
||||
|
||||
switch (bts.qualifier) {
|
||||
case BTS_BRANCH:
|
||||
trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect the trace on the current cpu and write it into the ftrace buffer.
|
||||
*
|
||||
* pre: tracing must be suspended on the current cpu
|
||||
*/
|
||||
static void trace_bts_cpu(void *arg)
|
||||
{
|
||||
struct trace_array *tr = (struct trace_array *)arg;
|
||||
const struct bts_trace *trace;
|
||||
unsigned char *at;
|
||||
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
|
||||
return;
|
||||
|
||||
if (unlikely(!this_tracer))
|
||||
return;
|
||||
|
||||
trace = ds_read_bts(this_tracer);
|
||||
if (!trace)
|
||||
return;
|
||||
|
||||
for (at = trace->ds.top; (void *)at < trace->ds.end;
|
||||
at += trace->ds.size)
|
||||
trace_bts_at(trace, at);
|
||||
|
||||
for (at = trace->ds.begin; (void *)at < trace->ds.top;
|
||||
at += trace->ds.size)
|
||||
trace_bts_at(trace, at);
|
||||
}
|
||||
|
||||
static void trace_bts_prepare(struct trace_iterator *iter)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
/*
|
||||
* We need to collect the trace on the respective cpu since ftrace
|
||||
* implicitly adds the record for the current cpu.
|
||||
* Once that is more flexible, we could collect the data from any cpu.
|
||||
*/
|
||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static void trace_bts_close(struct trace_iterator *iter)
|
||||
{
|
||||
tracing_reset_online_cpus(iter->tr);
|
||||
}
|
||||
|
||||
void trace_hw_branch_oops(void)
|
||||
{
|
||||
if (this_tracer) {
|
||||
ds_suspend_bts_noirq(this_tracer);
|
||||
trace_bts_cpu(hw_branch_trace);
|
||||
ds_resume_bts_noirq(this_tracer);
|
||||
}
|
||||
}
|
||||
|
||||
struct tracer bts_tracer __read_mostly =
|
||||
{
|
||||
.name = "hw-branch-tracer",
|
||||
.init = bts_trace_init,
|
||||
.reset = bts_trace_reset,
|
||||
.print_header = bts_trace_print_header,
|
||||
.print_line = bts_trace_print_line,
|
||||
.start = bts_trace_start,
|
||||
.stop = bts_trace_stop,
|
||||
.open = trace_bts_prepare,
|
||||
.close = trace_bts_close,
|
||||
#ifdef CONFIG_FTRACE_SELFTEST
|
||||
.selftest = trace_selftest_startup_hw_branches,
|
||||
#endif /* CONFIG_FTRACE_SELFTEST */
|
||||
};
|
||||
|
||||
__init static int init_bts_trace(void)
|
||||
{
|
||||
register_hotcpu_notifier(&bts_hotcpu_notifier);
|
||||
return register_tracer(&bts_tracer);
|
||||
}
|
||||
device_initcall(init_bts_trace);
|
||||
+327
-204
File diff suppressed because it is too large
Load Diff
@@ -34,12 +34,6 @@
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* For now, let us restrict the no. of symbols traced simultaneously to number
|
||||
* of available hardware breakpoint registers.
|
||||
*/
|
||||
#define KSYM_TRACER_MAX HBP_NUM
|
||||
|
||||
#define KSYM_TRACER_OP_LEN 3 /* rw- */
|
||||
|
||||
struct trace_ksym {
|
||||
@@ -53,7 +47,6 @@ struct trace_ksym {
|
||||
|
||||
static struct trace_array *ksym_trace_array;
|
||||
|
||||
static unsigned int ksym_filter_entry_count;
|
||||
static unsigned int ksym_tracing_enabled;
|
||||
|
||||
static HLIST_HEAD(ksym_filter_head);
|
||||
@@ -181,13 +174,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
|
||||
struct trace_ksym *entry;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
|
||||
printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
|
||||
" new requests for tracing can be accepted now.\n",
|
||||
KSYM_TRACER_MAX);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
@@ -203,13 +189,17 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
|
||||
|
||||
if (IS_ERR(entry->ksym_hbp)) {
|
||||
ret = PTR_ERR(entry->ksym_hbp);
|
||||
printk(KERN_INFO "ksym_tracer request failed. Try again"
|
||||
" later!!\n");
|
||||
if (ret == -ENOSPC) {
|
||||
printk(KERN_ERR "ksym_tracer: Maximum limit reached."
|
||||
" No new requests for tracing can be accepted now.\n");
|
||||
} else {
|
||||
printk(KERN_INFO "ksym_tracer request failed. Try again"
|
||||
" later!!\n");
|
||||
}
|
||||
goto err;
|
||||
}
|
||||
|
||||
hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
|
||||
ksym_filter_entry_count++;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -265,7 +255,6 @@ static void __ksym_trace_reset(void)
|
||||
hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
|
||||
ksym_hlist) {
|
||||
unregister_wide_hw_breakpoint(entry->ksym_hbp);
|
||||
ksym_filter_entry_count--;
|
||||
hlist_del_rcu(&(entry->ksym_hlist));
|
||||
synchronize_rcu();
|
||||
kfree(entry);
|
||||
@@ -338,7 +327,6 @@ static ssize_t ksym_trace_filter_write(struct file *file,
|
||||
goto out_unlock;
|
||||
}
|
||||
/* Error or "symbol:---" case: drop it */
|
||||
ksym_filter_entry_count--;
|
||||
hlist_del_rcu(&(entry->ksym_hlist));
|
||||
synchronize_rcu();
|
||||
kfree(entry);
|
||||
|
||||
@@ -17,7 +17,6 @@ static inline int trace_valid_entry(struct trace_entry *entry)
|
||||
case TRACE_BRANCH:
|
||||
case TRACE_GRAPH_ENT:
|
||||
case TRACE_GRAPH_RET:
|
||||
case TRACE_HW_BRANCHES:
|
||||
case TRACE_KSYM:
|
||||
return 1;
|
||||
}
|
||||
@@ -755,62 +754,6 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
|
||||
}
|
||||
#endif /* CONFIG_BRANCH_TRACER */
|
||||
|
||||
#ifdef CONFIG_HW_BRANCH_TRACER
|
||||
int
|
||||
trace_selftest_startup_hw_branches(struct tracer *trace,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct trace_iterator *iter;
|
||||
struct tracer tracer;
|
||||
unsigned long count;
|
||||
int ret;
|
||||
|
||||
if (!trace->open) {
|
||||
printk(KERN_CONT "missing open function...");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = tracer_init(trace, tr);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The hw-branch tracer needs to collect the trace from the various
|
||||
* cpu trace buffers - before tracing is stopped.
|
||||
*/
|
||||
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
|
||||
if (!iter)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&tracer, trace, sizeof(tracer));
|
||||
|
||||
iter->trace = &tracer;
|
||||
iter->tr = tr;
|
||||
iter->pos = -1;
|
||||
mutex_init(&iter->mutex);
|
||||
|
||||
trace->open(iter);
|
||||
|
||||
mutex_destroy(&iter->mutex);
|
||||
kfree(iter);
|
||||
|
||||
tracing_stop();
|
||||
|
||||
ret = trace_test_buffer(tr, &count);
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
if (!ret && !count) {
|
||||
printk(KERN_CONT "no entries found..");
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_HW_BRANCH_TRACER */
|
||||
|
||||
#ifdef CONFIG_KSYM_TRACER
|
||||
static int ksym_selftest_dummy;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user