You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf changes from Ingo Molnar:
"Lots of changes:
- (much) improved assembly annotation support in perf report, with
jump visualization, searching, navigation, visual output
improvements and more.
- kernel support for AMD IBS PMU hardware features. Notably 'perf
record -e cycles:p' and 'perf top -e cycles:p' should work without
skid now, like PEBS does on the Intel side, because it takes
advantage of IBS transparently.
- the libtracevents library: it is the first step towards unifying
tracing tooling and perf, and it also gives a tracing library for
external tools like powertop to rely on.
- infrastructure: various improvements and refactoring of the UI
modules and related code
- infrastructure: cleanup and simplification of the profiling
targets code (--uid, --pid, --tid, --cpu, --all-cpus, etc.)
- tons of robustness fixes all around
- various ftrace updates: speedups, cleanups, robustness
improvements.
- typing 'make' in tools/ will now give you a menu of projects to
build and a short help text to explain what each does.
- ... and lots of other changes I forgot to list.
The perf record make bzImage + perf report regression you reported
should be fixed."
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (166 commits)
tracing: Remove kernel_lock annotations
tracing: Fix initial buffer_size_kb state
ring-buffer: Merge separate resize loops
perf evsel: Create events initially disabled -- again
perf tools: Split term type into value type and term type
perf hists: Fix callchain ip printf format
perf target: Add uses_mmap field
ftrace: Remove selecting FRAME_POINTER with FUNCTION_TRACER
ftrace/x86: Have x86 ftrace use the ftrace_modify_all_code()
ftrace: Make ftrace_modify_all_code() global for archs to use
ftrace: Return record ip addr for ftrace_location()
ftrace: Consolidate ftrace_location() and ftrace_text_reserved()
ftrace: Speed up search by skipping pages by address
ftrace: Remove extra helper functions
ftrace: Sort all function addresses, not just per page
tracing: change CPU ring buffer state from tracing_cpumask
tracing: Check return value of tracing_dentry_percpu()
ring-buffer: Reset head page before running self test
ring-buffer: Add integrity check at end of iter read
ring-buffer: Make addition of pages in ring buffer atomic
...
This commit is contained in:
@@ -1471,6 +1471,13 @@ kernelrelease:
|
||||
kernelversion:
|
||||
@echo $(KERNELVERSION)
|
||||
|
||||
# Clear a bunch of variables before executing the submake
|
||||
tools/: FORCE
|
||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/
|
||||
|
||||
tools/%: FORCE
|
||||
$(Q)$(MAKE) LDFLAGS= MAKEFLAGS= -C $(src)/tools/ $*
|
||||
|
||||
# Single targets
|
||||
# ---------------------------------------------------------------------------
|
||||
# Single targets are compatible with:
|
||||
|
||||
@@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
|
||||
|
||||
idx = la_ptr;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
for (j = 0; j < cpuc->n_events; j++) {
|
||||
if (cpuc->current_idx[j] == idx)
|
||||
break;
|
||||
@@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
|
||||
|
||||
hwc = &event->hw;
|
||||
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
|
||||
if (alpha_perf_event_set_period(event, hwc, idx)) {
|
||||
if (perf_event_overflow(event, &data, regs)) {
|
||||
|
||||
@@ -11,7 +11,7 @@ CONFIG_KALLSYMS_EXTRA_PASS=y
|
||||
# CONFIG_TIMERFD is not set
|
||||
# CONFIG_EVENTFD is not set
|
||||
# CONFIG_AIO is not set
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
||||
@@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
|
||||
*/
|
||||
armv6_pmcr_write(pmcr);
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
@@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
*/
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
@@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
@@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
@@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
@@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
perf_sample_data_init(&data, 0, 0);
|
||||
|
||||
switch (counters) {
|
||||
#define HANDLE_COUNTER(n) \
|
||||
|
||||
@@ -32,7 +32,7 @@ CONFIG_RD_LZMA=y
|
||||
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
CONFIG_KPROBES=y
|
||||
|
||||
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_ELF_CORE is not set
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
||||
@@ -9,7 +9,7 @@ CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_ELF_CORE is not set
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
||||
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||
if (record) {
|
||||
struct perf_sample_data data;
|
||||
|
||||
perf_sample_data_init(&data, ~0ULL);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
|
||||
|
||||
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
|
||||
perf_get_data_addr(regs, &data.addr);
|
||||
|
||||
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||
if (record) {
|
||||
struct perf_sample_data data;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
|
||||
if (perf_event_overflow(event, &data, regs))
|
||||
fsl_emb_pmu_stop(event, 0);
|
||||
|
||||
@@ -5,7 +5,7 @@ CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
|
||||
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_LOG_BUF_SHIFT=18
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_PERF_COUNTERS=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
|
||||
@@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||
|
||||
regs = args->regs;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
/* If the PMU has the TOE IRQ enable bits, we need to do a
|
||||
@@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||
if (val & (1ULL << 31))
|
||||
continue;
|
||||
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||
if (!sparc_perf_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ config X86
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_KVM
|
||||
select HAVE_ARCH_KGDB
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void mcount(void);
|
||||
extern int modifying_ftrace_code;
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
@@ -50,6 +51,8 @@ struct dyn_arch_ftrace {
|
||||
/* No extra data needed for x86 */
|
||||
};
|
||||
|
||||
int ftrace_int3_handler(struct pt_regs *regs);
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
@@ -134,6 +134,8 @@
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
|
||||
#define MSR_AMD64_IBSFETCH_REG_COUNT 3
|
||||
#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
|
||||
#define MSR_AMD64_IBSOPCTL 0xc0011033
|
||||
#define MSR_AMD64_IBSOPRIP 0xc0011034
|
||||
#define MSR_AMD64_IBSOPDATA 0xc0011035
|
||||
@@ -141,8 +143,11 @@
|
||||
#define MSR_AMD64_IBSOPDATA3 0xc0011037
|
||||
#define MSR_AMD64_IBSDCLINAD 0xc0011038
|
||||
#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
|
||||
#define MSR_AMD64_IBSOP_REG_COUNT 7
|
||||
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
|
||||
#define MSR_AMD64_IBSCTL 0xc001103a
|
||||
#define MSR_AMD64_IBSBRTARGET 0xc001103b
|
||||
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
|
||||
|
||||
/* Fam 15h MSRs */
|
||||
#define MSR_F15H_PERF_CTL 0xc0010200
|
||||
|
||||
@@ -158,6 +158,7 @@ struct x86_pmu_capability {
|
||||
#define IBS_CAPS_OPCNT (1U<<4)
|
||||
#define IBS_CAPS_BRNTRGT (1U<<5)
|
||||
#define IBS_CAPS_OPCNTEXT (1U<<6)
|
||||
#define IBS_CAPS_RIPINVALIDCHK (1U<<7)
|
||||
|
||||
#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
|
||||
| IBS_CAPS_FETCHSAM \
|
||||
@@ -170,21 +171,28 @@ struct x86_pmu_capability {
|
||||
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
|
||||
#define IBSCTL_LVT_OFFSET_MASK 0x0F
|
||||
|
||||
/* IbsFetchCtl bits/masks */
|
||||
/* ibs fetch bits/masks */
|
||||
#define IBS_FETCH_RAND_EN (1ULL<<57)
|
||||
#define IBS_FETCH_VAL (1ULL<<49)
|
||||
#define IBS_FETCH_ENABLE (1ULL<<48)
|
||||
#define IBS_FETCH_CNT 0xFFFF0000ULL
|
||||
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
|
||||
|
||||
/* IbsOpCtl bits */
|
||||
/* ibs op bits/masks */
|
||||
/* lower 4 bits of the current count are ignored: */
|
||||
#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
|
||||
#define IBS_OP_CNT_CTL (1ULL<<19)
|
||||
#define IBS_OP_VAL (1ULL<<18)
|
||||
#define IBS_OP_ENABLE (1ULL<<17)
|
||||
#define IBS_OP_MAX_CNT 0x0000FFFFULL
|
||||
#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
|
||||
#define IBS_RIP_INVALID (1ULL<<38)
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
extern u32 get_ibs_caps(void);
|
||||
#else
|
||||
static inline u32 get_ibs_caps(void) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
extern void perf_events_lapic_init(void);
|
||||
|
||||
@@ -484,9 +484,6 @@ static int __x86_pmu_event_init(struct perf_event *event)
|
||||
|
||||
/* mark unused */
|
||||
event->hw.extra_reg.idx = EXTRA_REG_NONE;
|
||||
|
||||
/* mark not used */
|
||||
event->hw.extra_reg.idx = EXTRA_REG_NONE;
|
||||
event->hw.branch_reg.idx = EXTRA_REG_NONE;
|
||||
|
||||
return x86_pmu.hw_config(event);
|
||||
@@ -1186,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
int idx, handled = 0;
|
||||
u64 val;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
/*
|
||||
@@ -1222,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
* event overflow
|
||||
*/
|
||||
handled++;
|
||||
data.period = event->hw.last_period;
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
|
||||
if (!x86_perf_event_set_period(event))
|
||||
continue;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user