You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf changes from Ingo Molnar:
"Kernel side changes:
- Consolidate the PMU interrupt-disabled code amongst architectures
(Vince Weaver)
- misc fixes
Tooling changes (new features, user visible changes):
- Add support for pagefault tracing in 'trace', please see multiple
examples in the changeset messages (Stanislav Fomichev).
- Add pagefault statistics in 'trace' (Stanislav Fomichev)
- Add header for columns in 'top' and 'report' TUI browsers (Jiri
Olsa)
- Add pagefault statistics in 'trace' (Stanislav Fomichev)
- Add IO mode into timechart command (Stanislav Fomichev)
- Fallback to syscalls:* when raw_syscalls:* is not available in the
perl and python perf scripts. (Daniel Bristot de Oliveira)
- Add --repeat global option to 'perf bench' to be used in benchmarks
such as the existing 'futex' one, that was modified to use it
instead of a local option. (Davidlohr Bueso)
- Fix fd -> pathname resolution in 'trace', be it using /proc or a
vfs_getname probe point. (Arnaldo Carvalho de Melo)
- Add suggestion of how to set perf_event_paranoid sysctl, to help
non-root users trying tools like 'trace' to get a working
environment. (Arnaldo Carvalho de Melo)
- Updates from trace-cmd for traceevent plugin_kvm plus args cleanup
(Steven Rostedt, Jan Kiszka)
- Support S/390 in 'perf kvm stat' (Alexander Yarygin)
Tooling infrastructure changes:
- Allow reserving a row for header purposes in the hists browser
(Arnaldo Carvalho de Melo)
- Various fixes and prep work related to supporting Intel PT (Adrian
Hunter)
- Introduce multiple debug variables control (Jiri Olsa)
- Add callchain and additional sample information for python scripts
(Joseph Schuchart)
- More prep work to support Intel PT: (Adrian Hunter)
- Polishing 'script' BTS output
- 'inject' can specify --kallsym
- VDSO is per machine, not a global var
- Expose data addr lookup functions previously private to 'script'
- Large mmap fixes in events processing
- Include standard stringify macros in power pc code (Sukadev
Bhattiprolu)
Tooling cleanups:
- Convert open coded equivalents to asprintf() (Andy Shevchenko)
- Remove needless reassignments in 'trace' (Arnaldo Carvalho de Melo)
- Cache the is_exit syscall test in 'trace) (Arnaldo Carvalho de
Melo)
- No need to reimplement err() in 'perf bench sched-messaging', drop
barf(). (Davidlohr Bueso).
- Remove ev_name argument from perf_evsel__hists_browse, can be
obtained from the other parameters. (Jiri Olsa)
Tooling fixes:
- Fix memory leak in the 'sched-messaging' perf bench test.
(Davidlohr Bueso)
- The -o and -n 'perf bench mem' options are mutually exclusive, emit
error when both are specified. (Davidlohr Bueso)
- Fix scrollbar refresh row index in the ui browser, problem exposed
now that headers will be added and will be allowed to be switched
on/off. (Jiri Olsa)
- Handle the num array type in python properly (Sebastian Andrzej
Siewior)
- Fix wrong condition for allocation failure (Jiri Olsa)
- Adjust callchain based on DWARF debug info on powerpc (Sukadev
Bhattiprolu)
- Fix a risk for doing free on uninitialized pointer in traceevent
lib (Rickard Strandqvist)
- Update attr test with PERF_FLAG_FD_CLOEXEC flag (Jiri Olsa)
- Enable close-on-exec flag on perf file descriptor (Yann Droneaud)
- Fix build on gcc 4.4.7 (Arnaldo Carvalho de Melo)
- Event ordering fixes (Jiri Olsa)"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (123 commits)
Revert "perf tools: Fix jump label always changing during tracing"
perf tools: Fix perf usage string leftover
perf: Check permission only for parent tracepoint event
perf record: Store PERF_RECORD_FINISHED_ROUND only for nonempty rounds
perf record: Always force PERF_RECORD_FINISHED_ROUND event
perf inject: Add --kallsyms parameter
perf tools: Expose 'addr' functions so they can be reused
perf session: Fix accounting of ordered samples queue
perf powerpc: Include util/util.h and remove stringify macros
perf tools: Fix build on gcc 4.4.7
perf tools: Add thread parameter to vdso__dso_findnew()
perf tools: Add dso__type()
perf tools: Separate the VDSO map name from the VDSO dso name
perf tools: Add vdso__new()
perf machine: Fix the lifetime of the VDSO temporary file
perf tools: Group VDSO global variables into a structure
perf session: Add ability to skip 4GiB or more
perf session: Add ability to 'skip' a non-piped event stream
perf tools: Pass machine to vdso__dso_findnew()
perf tools: Add dso__data_size()
...
This commit is contained in:
@@ -99,10 +99,6 @@ static int arc_pmu_event_init(struct perf_event *event)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int ret;
|
||||
|
||||
/* ARC 700 PMU does not support sampling events */
|
||||
if (is_sampling_event(event))
|
||||
return -ENOENT;
|
||||
|
||||
switch (event->attr.type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
if (event->attr.config >= PERF_COUNT_HW_MAX)
|
||||
@@ -298,6 +294,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||
.read = arc_pmu_read,
|
||||
};
|
||||
|
||||
/* ARC 700 PMU does not support sampling events */
|
||||
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -389,14 +389,6 @@ static int bfin_pmu_event_init(struct perf_event *event)
|
||||
if (attr->exclude_hv || attr->exclude_idle)
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* All of the on-chip counters are "limited", in that they have
|
||||
* no interrupts, and are therefore unable to do sampling without
|
||||
* further work and timer assistance.
|
||||
*/
|
||||
if (hwc->sample_period)
|
||||
return -EINVAL;
|
||||
|
||||
ret = 0;
|
||||
switch (attr->type) {
|
||||
case PERF_TYPE_RAW:
|
||||
@@ -490,6 +482,13 @@ static int __init bfin_pmu_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* All of the on-chip counters are "limited", in that they have
|
||||
* no interrupts, and are therefore unable to do sampling without
|
||||
* further work and timer assistance.
|
||||
*/
|
||||
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||
if (!ret)
|
||||
perf_cpu_notifier(bfin_pmu_notifier);
|
||||
|
||||
@@ -567,16 +567,6 @@ static int _hw_perf_event_init(struct perf_event *event)
|
||||
if (mapping == -1)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Early cores have "limited" counters - they have no overflow
|
||||
* interrupts - and so are unable to do sampling without extra work
|
||||
* and timer assistance.
|
||||
*/
|
||||
if (metag_pmu->max_period == 0) {
|
||||
if (hwc->sample_period)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Don't assign an index until the event is placed into the hardware.
|
||||
* -1 signifies that we're still deciding where to put it. On SMP
|
||||
@@ -866,6 +856,15 @@ static int __init init_hw_perf_events(void)
|
||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||
metag_pmu->name, metag_pmu->max_events);
|
||||
|
||||
/*
|
||||
* Early cores have "limited" counters - they have no overflow
|
||||
* interrupts - and so are unable to do sampling without extra work
|
||||
* and timer assistance.
|
||||
*/
|
||||
if (metag_pmu->max_period == 0) {
|
||||
metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
}
|
||||
|
||||
/* Initialise the active events and reservation mutex */
|
||||
atomic_set(&metag_pmu->active_events, 0);
|
||||
mutex_init(&metag_pmu->reserve_mutex);
|
||||
|
||||
@@ -387,8 +387,7 @@ static int h_24x7_event_init(struct perf_event *event)
|
||||
event->attr.exclude_hv ||
|
||||
event->attr.exclude_idle ||
|
||||
event->attr.exclude_host ||
|
||||
event->attr.exclude_guest ||
|
||||
is_sampling_event(event)) /* no sampling */
|
||||
event->attr.exclude_guest)
|
||||
return -EINVAL;
|
||||
|
||||
/* no branch sampling */
|
||||
@@ -513,6 +512,9 @@ static int hv_24x7_init(void)
|
||||
if (!hv_page_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
/* sampling not supported */
|
||||
h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -210,8 +210,7 @@ static int h_gpci_event_init(struct perf_event *event)
|
||||
event->attr.exclude_hv ||
|
||||
event->attr.exclude_idle ||
|
||||
event->attr.exclude_host ||
|
||||
event->attr.exclude_guest ||
|
||||
is_sampling_event(event)) /* no sampling */
|
||||
event->attr.exclude_guest)
|
||||
return -EINVAL;
|
||||
|
||||
/* no branch sampling */
|
||||
@@ -284,6 +283,9 @@ static int hv_gpci_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* sampling not supported */
|
||||
h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -16,6 +16,7 @@ header-y += ioctls.h
|
||||
header-y += ipcbuf.h
|
||||
header-y += kvm.h
|
||||
header-y += kvm_para.h
|
||||
header-y += kvm_perf.h
|
||||
header-y += kvm_virtio.h
|
||||
header-y += mman.h
|
||||
header-y += monwriter.h
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Definitions for perf-kvm on s390
|
||||
*
|
||||
* Copyright 2014 IBM Corp.
|
||||
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_KVM_PERF_S390_H
|
||||
#define __LINUX_KVM_PERF_S390_H
|
||||
|
||||
#include <asm/sie.h>
|
||||
|
||||
#define DECODE_STR_LEN 40
|
||||
|
||||
#define VCPU_ID "id"
|
||||
|
||||
#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
|
||||
#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
|
||||
#define KVM_EXIT_REASON "icptcode"
|
||||
|
||||
#endif
|
||||
@@ -411,12 +411,6 @@ static int cpumf_pmu_event_init(struct perf_event *event)
|
||||
case PERF_TYPE_HARDWARE:
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
case PERF_TYPE_RAW:
|
||||
/* The CPU measurement counter facility does not have overflow
|
||||
* interrupts to do sampling. Sampling must be provided by
|
||||
* external means, for example, by timers.
|
||||
*/
|
||||
if (is_sampling_event(event))
|
||||
return -ENOENT;
|
||||
err = __hw_perf_event_init(event);
|
||||
break;
|
||||
default:
|
||||
@@ -681,6 +675,12 @@ static int __init cpumf_pmu_init(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* The CPU measurement counter facility does not have overflow
|
||||
* interrupts to do sampling. Sampling must be provided by
|
||||
* external means, for example, by timers.
|
||||
*/
|
||||
cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
cpumf_pmu.attr_groups = cpumf_cf_event_group();
|
||||
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
|
||||
if (rc) {
|
||||
|
||||
@@ -128,14 +128,6 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||
if (!sh_pmu_initialized())
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* All of the on-chip counters are "limited", in that they have
|
||||
* no interrupts, and are therefore unable to do sampling without
|
||||
* further work and timer assistance.
|
||||
*/
|
||||
if (hwc->sample_period)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* See if we need to reserve the counter.
|
||||
*
|
||||
@@ -392,6 +384,13 @@ int register_sh_pmu(struct sh_pmu *_pmu)
|
||||
|
||||
pr_info("Performance Events: %s support registered\n", _pmu->name);
|
||||
|
||||
/*
|
||||
* All of the on-chip counters are "limited", in that they have
|
||||
* no interrupts, and are therefore unable to do sampling without
|
||||
* further work and timer assistance.
|
||||
*/
|
||||
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
|
||||
|
||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||
|
||||
@@ -22,6 +22,7 @@ header-y += ipcbuf.h
|
||||
header-y += ist.h
|
||||
header-y += kvm.h
|
||||
header-y += kvm_para.h
|
||||
header-y += kvm_perf.h
|
||||
header-y += ldt.h
|
||||
header-y += mce.h
|
||||
header-y += mman.h
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
#ifndef _ASM_X86_KVM_PERF_H
|
||||
#define _ASM_X86_KVM_PERF_H
|
||||
|
||||
#include <asm/svm.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/kvm.h>
|
||||
|
||||
#define DECODE_STR_LEN 20
|
||||
|
||||
#define VCPU_ID "vcpu_id"
|
||||
|
||||
#define KVM_ENTRY_TRACE "kvm:kvm_entry"
|
||||
#define KVM_EXIT_TRACE "kvm:kvm_exit"
|
||||
#define KVM_EXIT_REASON "exit_reason"
|
||||
|
||||
#endif /* _ASM_X86_KVM_PERF_H */
|
||||
@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
|
||||
cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_up_prepare(unsigned int cpu)
|
||||
static int amd_uncore_cpu_up_prepare(unsigned int cpu)
|
||||
{
|
||||
struct amd_uncore *uncore;
|
||||
struct amd_uncore *uncore_nb = NULL, *uncore_l2;
|
||||
|
||||
if (amd_uncore_nb) {
|
||||
uncore = amd_uncore_alloc(cpu);
|
||||
uncore->cpu = cpu;
|
||||
uncore->num_counters = NUM_COUNTERS_NB;
|
||||
uncore->rdpmc_base = RDPMC_BASE_NB;
|
||||
uncore->msr_base = MSR_F15H_NB_PERF_CTL;
|
||||
uncore->active_mask = &amd_nb_active_mask;
|
||||
uncore->pmu = &amd_nb_pmu;
|
||||
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
|
||||
uncore_nb = amd_uncore_alloc(cpu);
|
||||
if (!uncore_nb)
|
||||
goto fail;
|
||||
uncore_nb->cpu = cpu;
|
||||
uncore_nb->num_counters = NUM_COUNTERS_NB;
|
||||
uncore_nb->rdpmc_base = RDPMC_BASE_NB;
|
||||
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
|
||||
uncore_nb->active_mask = &amd_nb_active_mask;
|
||||
uncore_nb->pmu = &amd_nb_pmu;
|
||||
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
|
||||
}
|
||||
|
||||
if (amd_uncore_l2) {
|
||||
uncore = amd_uncore_alloc(cpu);
|
||||
uncore->cpu = cpu;
|
||||
uncore->num_counters = NUM_COUNTERS_L2;
|
||||
uncore->rdpmc_base = RDPMC_BASE_L2;
|
||||
uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
|
||||
uncore->active_mask = &amd_l2_active_mask;
|
||||
uncore->pmu = &amd_l2_pmu;
|
||||
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
|
||||
uncore_l2 = amd_uncore_alloc(cpu);
|
||||
if (!uncore_l2)
|
||||
goto fail;
|
||||
uncore_l2->cpu = cpu;
|
||||
uncore_l2->num_counters = NUM_COUNTERS_L2;
|
||||
uncore_l2->rdpmc_base = RDPMC_BASE_L2;
|
||||
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
|
||||
uncore_l2->active_mask = &amd_l2_active_mask;
|
||||
uncore_l2->pmu = &amd_l2_pmu;
|
||||
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(uncore_nb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct amd_uncore *
|
||||
@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
||||
|
||||
if (!--uncore->refcnt)
|
||||
kfree(uncore);
|
||||
*per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
|
||||
*per_cpu_ptr(uncores, cpu) = NULL;
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_dead(unsigned int cpu)
|
||||
@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
amd_uncore_cpu_up_prepare(cpu);
|
||||
if (amd_uncore_cpu_up_prepare(cpu))
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
break;
|
||||
|
||||
case CPU_STARTING:
|
||||
@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy)
|
||||
amd_uncore_cpu_online(cpu);
|
||||
}
|
||||
|
||||
static void cleanup_cpu_online(void *dummy)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
amd_uncore_cpu_dead(cpu);
|
||||
}
|
||||
|
||||
static int __init amd_uncore_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int cpu, cpu2;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
return -ENODEV;
|
||||
goto fail_nodev;
|
||||
|
||||
if (!cpu_has_topoext)
|
||||
return -ENODEV;
|
||||
goto fail_nodev;
|
||||
|
||||
if (cpu_has_perfctr_nb) {
|
||||
amd_uncore_nb = alloc_percpu(struct amd_uncore *);
|
||||
perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
|
||||
if (!amd_uncore_nb) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_nb;
|
||||
}
|
||||
ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
|
||||
if (ret)
|
||||
goto fail_nb;
|
||||
|
||||
printk(KERN_INFO "perf: AMD NB counters detected\n");
|
||||
ret = 0;
|
||||
@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void)
|
||||
|
||||
if (cpu_has_perfctr_l2) {
|
||||
amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
|
||||
perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
|
||||
if (!amd_uncore_l2) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_l2;
|
||||
}
|
||||
ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
|
||||
if (ret)
|
||||
goto fail_l2;
|
||||
|
||||
printk(KERN_INFO "perf: AMD L2I counters detected\n");
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
goto fail_nodev;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* init cpus already online before registering for hotplug notifier */
|
||||
for_each_online_cpu(cpu) {
|
||||
amd_uncore_cpu_up_prepare(cpu);
|
||||
ret = amd_uncore_cpu_up_prepare(cpu);
|
||||
if (ret)
|
||||
goto fail_online;
|
||||
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
|
||||
}
|
||||
|
||||
@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void)
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
fail_online:
|
||||
for_each_online_cpu(cpu2) {
|
||||
if (cpu2 == cpu)
|
||||
break;
|
||||
smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
|
||||
}
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
|
||||
amd_uncore_nb = amd_uncore_l2 = NULL;
|
||||
if (cpu_has_perfctr_l2)
|
||||
perf_pmu_unregister(&amd_l2_pmu);
|
||||
fail_l2:
|
||||
if (cpu_has_perfctr_nb)
|
||||
perf_pmu_unregister(&amd_nb_pmu);
|
||||
if (amd_uncore_l2)
|
||||
free_percpu(amd_uncore_l2);
|
||||
fail_nb:
|
||||
if (amd_uncore_nb)
|
||||
free_percpu(amd_uncore_nb);
|
||||
|
||||
fail_nodev:
|
||||
return ret;
|
||||
}
|
||||
device_initcall(amd_uncore_init);
|
||||
|
||||
@@ -2947,10 +2947,7 @@ again:
|
||||
* extra registers. If we failed to take an extra
|
||||
* register, try the alternative.
|
||||
*/
|
||||
if (idx % 2)
|
||||
idx--;
|
||||
else
|
||||
idx++;
|
||||
idx ^= 1;
|
||||
if (idx != reg1->idx % 6) {
|
||||
if (idx == 2)
|
||||
config1 >>= 8;
|
||||
|
||||
@@ -5266,6 +5266,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
|
||||
|
||||
goto got_name;
|
||||
} else {
|
||||
if (vma->vm_ops && vma->vm_ops->name) {
|
||||
name = (char *) vma->vm_ops->name(vma);
|
||||
if (name)
|
||||
goto cpy_name;
|
||||
}
|
||||
|
||||
name = (char *)arch_vma_name(vma);
|
||||
if (name)
|
||||
goto cpy_name;
|
||||
@@ -7804,7 +7810,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
|
||||
/*
|
||||
* Initialize the perf_event context in task_struct
|
||||
*/
|
||||
int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
static int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
{
|
||||
struct perf_event_context *child_ctx, *parent_ctx;
|
||||
struct perf_event_context *cloned_ctx;
|
||||
|
||||
@@ -30,6 +30,18 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We checked and allowed to create parent,
|
||||
* allow children without checking.
|
||||
*/
|
||||
if (p_event->parent)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* It's ok to check current process (owner) permissions in here,
|
||||
* because code below is called only via perf_event_open syscall.
|
||||
*/
|
||||
|
||||
/* The ftrace function trace is allowed only for root. */
|
||||
if (ftrace_event_is_function(tp_event)) {
|
||||
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
||||
|
||||
@@ -2395,7 +2395,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
|
||||
{
|
||||
struct print_arg *field;
|
||||
enum event_type type;
|
||||
char *token;
|
||||
char *token = NULL;
|
||||
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->type = PRINT_FLAGS;
|
||||
@@ -2448,7 +2448,7 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
|
||||
{
|
||||
struct print_arg *field;
|
||||
enum event_type type;
|
||||
char *token;
|
||||
char *token = NULL;
|
||||
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->type = PRINT_SYMBOL;
|
||||
@@ -2487,7 +2487,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
|
||||
{
|
||||
struct print_arg *field;
|
||||
enum event_type type;
|
||||
char *token;
|
||||
char *token = NULL;
|
||||
|
||||
memset(arg, 0, sizeof(*arg));
|
||||
arg->type = PRINT_HEX;
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
#include "event-parse.h"
|
||||
|
||||
static unsigned long long
|
||||
process___le16_to_cpup(struct trace_seq *s,
|
||||
unsigned long long *args)
|
||||
process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
|
||||
{
|
||||
uint16_t *val = (uint16_t *) (unsigned long) args[0];
|
||||
return val ? (long long) le16toh(*val) : 0;
|
||||
|
||||
@@ -30,8 +30,7 @@
|
||||
#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
|
||||
|
||||
static unsigned long long
|
||||
process_jbd2_dev_to_name(struct trace_seq *s,
|
||||
unsigned long long *args)
|
||||
process_jbd2_dev_to_name(struct trace_seq *s, unsigned long long *args)
|
||||
{
|
||||
unsigned int dev = args[0];
|
||||
|
||||
@@ -40,8 +39,7 @@ process_jbd2_dev_to_name(struct trace_seq *s,
|
||||
}
|
||||
|
||||
static unsigned long long
|
||||
process_jiffies_to_msecs(struct trace_seq *s,
|
||||
unsigned long long *args)
|
||||
process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
|
||||
{
|
||||
unsigned long long jiffies = args[0];
|
||||
|
||||
|
||||
@@ -240,25 +240,38 @@ static const char *find_exit_reason(unsigned isa, int val)
|
||||
for (i = 0; strings[i].val >= 0; i++)
|
||||
if (strings[i].val == val)
|
||||
break;
|
||||
if (strings[i].str)
|
||||
return strings[i].str;
|
||||
return "UNKNOWN";
|
||||
|
||||
return strings[i].str;
|
||||
}
|
||||
|
||||
static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
|
||||
struct event_format *event, void *context)
|
||||
static int print_exit_reason(struct trace_seq *s, struct pevent_record *record,
|
||||
struct event_format *event, const char *field)
|
||||
{
|
||||
unsigned long long isa;
|
||||
unsigned long long val;
|
||||
unsigned long long info1 = 0, info2 = 0;
|
||||
const char *reason;
|
||||
|
||||
if (pevent_get_field_val(s, event, "exit_reason", record, &val, 1) < 0)
|
||||
if (pevent_get_field_val(s, event, field, record, &val, 1) < 0)
|
||||
return -1;
|
||||
|
||||
if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0)
|
||||
isa = 1;
|
||||
|
||||
trace_seq_printf(s, "reason %s", find_exit_reason(isa, val));
|
||||
reason = find_exit_reason(isa, val);
|
||||
if (reason)
|
||||
trace_seq_printf(s, "reason %s", reason);
|
||||
else
|
||||
trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
|
||||
struct event_format *event, void *context)
|
||||
{
|
||||
unsigned long long info1 = 0, info2 = 0;
|
||||
|
||||
if (print_exit_reason(s, record, event, "exit_reason") < 0)
|
||||
return -1;
|
||||
|
||||
pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
|
||||
|
||||
@@ -313,6 +326,29 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record,
|
||||
struct event_format *event, void *context)
|
||||
{
|
||||
if (print_exit_reason(s, record, event, "exit_code") < 0)
|
||||
return -1;
|
||||
|
||||
pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
|
||||
pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
|
||||
pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
|
||||
pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record,
|
||||
struct event_format *event, void *context)
|
||||
{
|
||||
pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1);
|
||||
|
||||
return kvm_nested_vmexit_inject_handler(s, record, event, context);
|
||||
}
|
||||
|
||||
union kvm_mmu_page_role {
|
||||
unsigned word;
|
||||
struct {
|
||||
@@ -409,6 +445,12 @@ int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
|
||||
pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
|
||||
kvm_emulate_insn_handler, NULL);
|
||||
|
||||
pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
|
||||
kvm_nested_vmexit_handler, NULL);
|
||||
|
||||
pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
|
||||
kvm_nested_vmexit_inject_handler, NULL);
|
||||
|
||||
pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
|
||||
kvm_mmu_get_page_handler, NULL);
|
||||
|
||||
@@ -443,6 +485,12 @@ void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
|
||||
pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
|
||||
kvm_emulate_insn_handler, NULL);
|
||||
|
||||
pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
|
||||
kvm_nested_vmexit_handler, NULL);
|
||||
|
||||
pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
|
||||
kvm_nested_vmexit_inject_handler, NULL);
|
||||
|
||||
pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
|
||||
kvm_mmu_get_page_handler, NULL);
|
||||
|
||||
|
||||
@@ -16,6 +16,10 @@ This 'perf bench' command is a general framework for benchmark suites.
|
||||
|
||||
COMMON OPTIONS
|
||||
--------------
|
||||
-r::
|
||||
--repeat=::
|
||||
Specify amount of times to repeat the run (default 10).
|
||||
|
||||
-f::
|
||||
--format=::
|
||||
Specify format style.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user