mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (123 commits)
perf: Remove the nmi parameter from the oprofile_perf backend
x86, perf: Make copy_from_user_nmi() a library function
perf: Remove perf_event_attr::type check
x86, perf: P4 PMU - Fix typos in comments and style cleanup
perf tools: Make test use the preset debugfs path
perf tools: Add automated tests for events parsing
perf tools: De-opt the parse_events function
perf script: Fix display of IP address for non-callchain path
perf tools: Fix endian conversion reading event attr from file header
perf tools: Add missing 'node' alias to the hw_cache[] array
perf probe: Support adding probes on offline kernel modules
perf probe: Add probed module in front of function
perf probe: Introduce debuginfo to encapsulate dwarf information
perf-probe: Move dwarf library routines to dwarf-aux.{c, h}
perf probe: Remove redundant dwarf functions
perf probe: Move strtailcmp to string.c
perf probe: Rename DIE_FIND_CB_FOUND to DIE_FIND_CB_END
tracing/kprobe: Update symbol reference when loading module
tracing/kprobes: Support module init function probing
kprobes: Return -ENOENT if probe point doesn't exist
...
This commit is contained in:
@@ -22,14 +22,15 @@ current_tracer. Instead of that, add probe points via
|
||||
|
||||
Synopsis of kprobe_events
|
||||
-------------------------
|
||||
p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||
r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS] : Set a return probe
|
||||
p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
|
||||
r[:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
|
||||
-:[GRP/]EVENT : Clear a probe
|
||||
|
||||
GRP : Group name. If omitted, use "kprobes" for it.
|
||||
EVENT : Event name. If omitted, the event name is generated
|
||||
based on SYMBOL+offs or MEMADDR.
|
||||
SYMBOL[+offs] : Symbol+offset where the probe is inserted.
|
||||
based on SYM+offs or MEMADDR.
|
||||
MOD : Module name which has given SYM.
|
||||
SYM[+offs] : Symbol+offset where the probe is inserted.
|
||||
MEMADDR : Address where the probe is inserted.
|
||||
|
||||
FETCHARGS : Arguments. Each probe can have up to 128 args.
|
||||
|
||||
1
Makefile
1
Makefile
@@ -1290,6 +1290,7 @@ help:
|
||||
@echo ' make O=dir [targets] Locate all output files in "dir", including .config'
|
||||
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)'
|
||||
@echo ' make C=2 [targets] Force check of all c source with $$CHECK'
|
||||
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
|
||||
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
|
||||
@echo ' 1: warnings which may be relevant and do not occur too often'
|
||||
@echo ' 2: warnings which occur quite often but may still be relevant'
|
||||
|
||||
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (alpha_perf_event_set_period(event, hwc, idx)) {
|
||||
if (perf_event_overflow(event, 1, &data, regs)) {
|
||||
if (perf_event_overflow(event, &data, regs)) {
|
||||
/* Interrupts coming too quickly; "throttle" the
|
||||
* counter, i.e., disable it for a little while.
|
||||
*/
|
||||
|
||||
@@ -91,7 +91,7 @@ DEFINE_PER_CPU(u8, irq_work_pending);
|
||||
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
|
||||
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
|
||||
|
||||
void set_irq_work_pending(void)
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
set_irq_work_pending_flag();
|
||||
}
|
||||
|
||||
@@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
enum armv6mpcore_perf_types {
|
||||
@@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static inline unsigned long
|
||||
@@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
|
||||
@@ -255,6 +255,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -371,6 +385,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -787,7 +815,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
|
||||
@@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#define XSCALE_PMU_ENABLE 0x001
|
||||
@@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
@@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
|
||||
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
|
||||
/*
|
||||
* Handle hitting a HW-breakpoint.
|
||||
*/
|
||||
static void ptrace_hbptriggered(struct perf_event *bp, int unused,
|
||||
static void ptrace_hbptriggered(struct perf_event *bp,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@@ -479,7 +479,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
|
||||
attr.bp_type = type;
|
||||
attr.disabled = 1;
|
||||
|
||||
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
|
||||
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
|
||||
tsk);
|
||||
}
|
||||
|
||||
static int ptrace_gethbpregs(struct task_struct *tsk, long num,
|
||||
|
||||
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
|
||||
unsigned int address, destreg, data, type;
|
||||
unsigned int res = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
|
||||
|
||||
if (current->pid != previous_pid) {
|
||||
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
|
||||
|
||||
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
fault = __do_page_fault(mm, addr, fsr, tsk);
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
|
||||
if (fault & VM_FAULT_MAJOR)
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
|
||||
else if (fault & VM_FAULT_MINOR)
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
|
||||
|
||||
/*
|
||||
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
|
||||
|
||||
@@ -7,6 +7,10 @@
|
||||
extern int raw_show_trace;
|
||||
extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra);
|
||||
extern unsigned long unwind_stack_by_address(unsigned long stack_page,
|
||||
unsigned long *sp,
|
||||
unsigned long pc,
|
||||
unsigned long *ra);
|
||||
#else
|
||||
#define raw_show_trace 1
|
||||
static inline unsigned long unwind_stack(struct task_struct *task,
|
||||
|
||||
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
|
||||
if (!mipspmu_event_set_period(event, hwc, idx))
|
||||
return;
|
||||
|
||||
if (perf_event_overflow(event, 0, data, regs))
|
||||
if (perf_event_overflow(event, data, regs))
|
||||
mipspmu->disable_event(idx);
|
||||
}
|
||||
|
||||
|
||||
@@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/* 74K core has completely different cache event map. */
|
||||
@@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
|
||||
@@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
/* used by show_backtrace() */
|
||||
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra)
|
||||
/* generic stack unwinding function */
|
||||
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
|
||||
unsigned long *sp,
|
||||
unsigned long pc,
|
||||
unsigned long *ra)
|
||||
{
|
||||
unsigned long stack_page;
|
||||
struct mips_frame_info info;
|
||||
unsigned long size, ofs;
|
||||
int leaf;
|
||||
extern void ret_from_irq(void);
|
||||
extern void ret_from_exception(void);
|
||||
|
||||
stack_page = (unsigned long)task_stack_page(task);
|
||||
if (!stack_page)
|
||||
return 0;
|
||||
|
||||
@@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
*ra = 0;
|
||||
return __kernel_text_address(pc) ? pc : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unwind_stack_by_address);
|
||||
|
||||
/* used by show_backtrace() */
|
||||
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
|
||||
unsigned long pc, unsigned long *ra)
|
||||
{
|
||||
unsigned long stack_page = (unsigned long)task_stack_page(task);
|
||||
return unwind_stack_by_address(stack_page, sp, pc, ra);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
||||
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
|
||||
{
|
||||
if ((opcode & OPCODE) == LL) {
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, regs, 0);
|
||||
1, regs, 0);
|
||||
return simulate_ll(regs, opcode);
|
||||
}
|
||||
if ((opcode & OPCODE) == SC) {
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, regs, 0);
|
||||
1, regs, 0);
|
||||
return simulate_sc(regs, opcode);
|
||||
}
|
||||
|
||||
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
|
||||
int rd = (opcode & RD) >> 11;
|
||||
int rt = (opcode & RT) >> 16;
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, regs, 0);
|
||||
1, regs, 0);
|
||||
switch (rd) {
|
||||
case 0: /* CPU number */
|
||||
regs->regs[rt] = smp_processor_id();
|
||||
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
|
||||
{
|
||||
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, regs, 0);
|
||||
1, regs, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
|
||||
unsigned long value;
|
||||
unsigned int res;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, regs, 0);
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
|
||||
|
||||
/*
|
||||
* This load never faults.
|
||||
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
|
||||
mm_segment_t seg;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
|
||||
1, 0, regs, regs->cp0_badvaddr);
|
||||
1, regs, regs->cp0_badvaddr);
|
||||
/*
|
||||
* Did we catch a fault trying to load an instruction?
|
||||
* Or are we running in MIPS16 mode?
|
||||
|
||||
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
|
||||
}
|
||||
|
||||
emul:
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, 0, xcp, 0);
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
|
||||
MIPS_FPU_EMU_INC_STATS(emulated);
|
||||
switch (MIPSInst_OPCODE(ir)) {
|
||||
case ldc1_op:{
|
||||
|
||||
@@ -145,7 +145,7 @@ good_area:
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
@@ -154,12 +154,10 @@ good_area:
|
||||
BUG();
|
||||
}
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
||||
1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
||||
tsk->maj_flt++;
|
||||
} else {
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
||||
1, 0, regs, address);
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
|
||||
tsk->min_flt++;
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
|
||||
oprofilefs.o oprofile_stats.o \
|
||||
timer_int.o )
|
||||
|
||||
oprofile-y := $(DRIVER_OBJS) common.o
|
||||
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
|
||||
|
||||
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
|
||||
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
|
||||
|
||||
175
arch/mips/oprofile/backtrace.c
Normal file
175
arch/mips/oprofile/backtrace.c
Normal file
@@ -0,0 +1,175 @@
|
||||
#include <linux/oprofile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
struct stackframe {
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long ra;
|
||||
};
|
||||
|
||||
static inline int get_mem(unsigned long addr, unsigned long *result)
|
||||
{
|
||||
unsigned long *address = (unsigned long *) addr;
|
||||
if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
|
||||
return -1;
|
||||
if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
|
||||
return -3;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* These two instruction helpers were taken from process.c
|
||||
*/
|
||||
static inline int is_ra_save_ins(union mips_instruction *ip)
|
||||
{
|
||||
/* sw / sd $ra, offset($sp) */
|
||||
return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
|
||||
&& ip->i_format.rs == 29 && ip->i_format.rt == 31;
|
||||
}
|
||||
|
||||
static inline int is_sp_move_ins(union mips_instruction *ip)
|
||||
{
|
||||
/* addiu/daddiu sp,sp,-imm */
|
||||
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
|
||||
return 0;
|
||||
if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Looks for specific instructions that mark the end of a function.
|
||||
* This usually means we ran into the code area of the previous function.
|
||||
*/
|
||||
static inline int is_end_of_function_marker(union mips_instruction *ip)
|
||||
{
|
||||
/* jr ra */
|
||||
if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
|
||||
return 1;
|
||||
/* lui gp */
|
||||
if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO for userspace stack unwinding:
|
||||
* - handle cases where the stack is adjusted inside a function
|
||||
* (generally doesn't happen)
|
||||
* - find optimal value for max_instr_check
|
||||
* - try to find a way to handle leaf functions
|
||||
*/
|
||||
|
||||
static inline int unwind_user_frame(struct stackframe *old_frame,
|
||||
const unsigned int max_instr_check)
|
||||
{
|
||||
struct stackframe new_frame = *old_frame;
|
||||
off_t ra_offset = 0;
|
||||
size_t stack_size = 0;
|
||||
unsigned long addr;
|
||||
|
||||
if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
|
||||
return -9;
|
||||
|
||||
for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
|
||||
&& (!ra_offset || !stack_size); --addr) {
|
||||
union mips_instruction ip;
|
||||
|
||||
if (get_mem(addr, (unsigned long *) &ip))
|
||||
return -11;
|
||||
|
||||
if (is_sp_move_ins(&ip)) {
|
||||
int stack_adjustment = ip.i_format.simmediate;
|
||||
if (stack_adjustment > 0)
|
||||
/* This marks the end of the previous function,
|
||||
which means we overran. */
|
||||
break;
|
||||
stack_size = (unsigned) stack_adjustment;
|
||||
} else if (is_ra_save_ins(&ip)) {
|
||||
int ra_slot = ip.i_format.simmediate;
|
||||
if (ra_slot < 0)
|
||||
/* This shouldn't happen. */
|
||||
break;
|
||||
ra_offset = ra_slot;
|
||||
} else if (is_end_of_function_marker(&ip))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ra_offset || !stack_size)
|
||||
return -1;
|
||||
|
||||
if (ra_offset) {
|
||||
new_frame.ra = old_frame->sp + ra_offset;
|
||||
if (get_mem(new_frame.ra, &(new_frame.ra)))
|
||||
return -13;
|
||||
}
|
||||
|
||||
if (stack_size) {
|
||||
new_frame.sp = old_frame->sp + stack_size;
|
||||
if (get_mem(new_frame.sp, &(new_frame.sp)))
|
||||
return -14;
|
||||
}
|
||||
|
||||
if (new_frame.sp > old_frame->sp)
|
||||
return -2;
|
||||
|
||||
new_frame.pc = old_frame->ra;
|
||||
*old_frame = new_frame;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void do_user_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth)
|
||||
{
|
||||
const unsigned int max_instr_check = 512;
|
||||
const unsigned long high_addr = low_addr + THREAD_SIZE;
|
||||
|
||||
while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
|
||||
oprofile_add_trace(frame->ra);
|
||||
if (frame->sp < low_addr || frame->sp > high_addr)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KALLSYMS
|
||||
static inline void do_kernel_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth) { }
|
||||
#else
|
||||
static inline void do_kernel_backtrace(unsigned long low_addr,
|
||||
struct stackframe *frame,
|
||||
unsigned int depth)
|
||||
{
|
||||
while (depth-- && frame->pc) {
|
||||
frame->pc = unwind_stack_by_address(low_addr,
|
||||
&(frame->sp),
|
||||
frame->pc,
|
||||
&(frame->ra));
|
||||
oprofile_add_trace(frame->ra);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
|
||||
{
|
||||
struct stackframe frame = { .sp = regs->regs[29],
|
||||
.pc = regs->cp0_epc,
|
||||
.ra = regs->regs[31] };
|
||||
const int userspace = user_mode(regs);
|
||||
const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
|
||||
|
||||
if (userspace)
|
||||
do_user_backtrace(low_addr, &frame, depth);
|
||||
else
|
||||
do_kernel_backtrace(low_addr, &frame, depth);
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user