You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner:
"This is the next part of the hotplug rework.
- Convert all notifiers with a priority assigned
- Convert all CPU_STARTING/DYING notifiers
The final removal of the STARTING/DYING infrastructure will happen
when the merge window closes.
Another 700 hundred line of unpenetrable maze gone :)"
* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
timers/core: Correct callback order during CPU hot plug
leds/trigger/cpu: Move from CPU_STARTING to ONLINE level
powerpc/numa: Convert to hotplug state machine
arm/perf: Fix hotplug state machine conversion
irqchip/armada: Avoid unused function warnings
ARC/time: Convert to hotplug state machine
clocksource/atlas7: Convert to hotplug state machine
clocksource/armada-370-xp: Convert to hotplug state machine
clocksource/exynos_mct: Convert to hotplug state machine
clocksource/arm_global_timer: Convert to hotplug state machine
rcu: Convert rcutree to hotplug state machine
KVM/arm/arm64/vgic-new: Convert to hotplug state machine
smp/cfd: Convert core to hotplug state machine
x86/x2apic: Convert to CPU hotplug state machine
profile: Convert to hotplug state machine
timers/core: Convert to hotplug state machine
hrtimer: Convert to hotplug state machine
x86/tboot: Convert to hotplug state machine
arm64/armv8 deprecated: Convert to hotplug state machine
hwtracing/coresight-etm4x: Convert to hotplug state machine
...
This commit is contained in:
+18
-30
@@ -296,30 +296,23 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int arc_timer_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
|
||||
static int arc_timer_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
||||
|
||||
evt->cpumask = cpumask_of(smp_processor_id());
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_STARTING:
|
||||
clockevents_config_and_register(evt, arc_timer_freq,
|
||||
0, ULONG_MAX);
|
||||
enable_percpu_irq(arc_timer_irq, 0);
|
||||
break;
|
||||
case CPU_DYING:
|
||||
disable_percpu_irq(arc_timer_irq);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
|
||||
enable_percpu_irq(arc_timer_irq, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block arc_timer_cpu_nb = {
|
||||
.notifier_call = arc_timer_cpu_notify,
|
||||
};
|
||||
static int arc_timer_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
disable_percpu_irq(arc_timer_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* clockevent setup for boot CPU
|
||||
@@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
||||
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
|
||||
int ret;
|
||||
|
||||
ret = register_cpu_notifier(&arc_timer_cpu_nb);
|
||||
if (ret) {
|
||||
pr_err("Failed to register cpu notifier");
|
||||
return ret;
|
||||
}
|
||||
|
||||
arc_timer_irq = irq_of_parse_and_map(node, 0);
|
||||
if (arc_timer_irq <= 0) {
|
||||
pr_err("clockevent: missing irq");
|
||||
@@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
||||
return ret;
|
||||
}
|
||||
|
||||
evt->irq = arc_timer_irq;
|
||||
evt->cpumask = cpumask_of(smp_processor_id());
|
||||
clockevents_config_and_register(evt, arc_timer_freq,
|
||||
0, ARC_TIMER_MAX);
|
||||
|
||||
/* Needs apriori irq_set_percpu_devid() done in intc map function */
|
||||
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
|
||||
"Timer0 (per-cpu-tick)", evt);
|
||||
@@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node)
|
||||
return ret;
|
||||
}
|
||||
|
||||
enable_percpu_irq(arc_timer_irq, 0);
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
|
||||
"AP_ARC_TIMER_STARTING",
|
||||
arc_timer_starting_cpu,
|
||||
arc_timer_dying_cpu);
|
||||
if (ret) {
|
||||
pr_err("Failed to setup hotplug state");
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
+11
-20
@@ -310,24 +310,17 @@ static void twd_timer_setup(void)
|
||||
enable_percpu_irq(clk->irq, 0);
|
||||
}
|
||||
|
||||
static int twd_timer_cpu_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
static int twd_timer_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_STARTING:
|
||||
twd_timer_setup();
|
||||
break;
|
||||
case CPU_DYING:
|
||||
twd_timer_stop();
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
twd_timer_setup();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block twd_timer_cpu_nb = {
|
||||
.notifier_call = twd_timer_cpu_notify,
|
||||
};
|
||||
static int twd_timer_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
twd_timer_stop();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init twd_local_timer_common_register(struct device_node *np)
|
||||
{
|
||||
@@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
err = register_cpu_notifier(&twd_timer_cpu_nb);
|
||||
if (err)
|
||||
goto out_irq;
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
|
||||
"AP_ARM_TWD_STARTING",
|
||||
twd_timer_starting_cpu, twd_timer_dying_cpu);
|
||||
|
||||
twd_get_clock(np);
|
||||
if (!of_property_read_bool(np, "always-on"))
|
||||
@@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np)
|
||||
|
||||
return 0;
|
||||
|
||||
out_irq:
|
||||
free_percpu_irq(twd_ppi, twd_evt);
|
||||
out_free:
|
||||
iounmap(twd_base);
|
||||
twd_base = NULL;
|
||||
|
||||
@@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
|
||||
.notifier_call = mvebu_hwcc_notifier,
|
||||
};
|
||||
|
||||
static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
static int armada_xp_clear_l2_starting(unsigned int cpu)
|
||||
{
|
||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
armada_xp_clear_shared_l2();
|
||||
|
||||
return NOTIFY_OK;
|
||||
armada_xp_clear_shared_l2();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block armada_xp_clear_shared_l2_notifier = {
|
||||
.notifier_call = armada_xp_clear_shared_l2_notifier_func,
|
||||
.priority = 100,
|
||||
};
|
||||
|
||||
static void __init armada_370_coherency_init(struct device_node *np)
|
||||
{
|
||||
struct resource res;
|
||||
@@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np)
|
||||
|
||||
of_node_put(cpu_config_np);
|
||||
|
||||
register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);
|
||||
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
"AP_ARM_MVEBU_COHERENCY",
|
||||
armada_xp_clear_l2_starting, NULL);
|
||||
exit:
|
||||
set_cpu_coherent();
|
||||
}
|
||||
|
||||
+13
-14
@@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base)
|
||||
L310_POWER_CTRL);
|
||||
}
|
||||
|
||||
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
|
||||
static int l2c310_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
switch (act & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_STARTING:
|
||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
||||
break;
|
||||
case CPU_DYING:
|
||||
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int l2c310_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
||||
@@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
|
||||
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
|
||||
}
|
||||
|
||||
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
|
||||
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
|
||||
cpu_notifier(l2c310_cpu_enable_flz, 0);
|
||||
}
|
||||
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
|
||||
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
|
||||
"AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
|
||||
l2c310_dying_cpu);
|
||||
}
|
||||
|
||||
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
|
||||
|
||||
+17
-11
@@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
||||
* hardware state at every thread switch. We clear our held state when
|
||||
* a CPU has been killed, indicating that the VFP hardware doesn't contain
|
||||
* a threads VFP state. When a CPU starts up, we re-enable access to the
|
||||
* VFP hardware.
|
||||
*
|
||||
* Both CPU_DYING and CPU_STARTING are called on the CPU which
|
||||
* VFP hardware. The callbacks below are called on the CPU which
|
||||
* is being offlined/onlined.
|
||||
*/
|
||||
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
|
||||
void *hcpu)
|
||||
static int vfp_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
if (action == CPU_DYING || action == CPU_DYING_FROZEN)
|
||||
vfp_current_hw_state[(long)hcpu] = NULL;
|
||||
else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
vfp_enable(NULL);
|
||||
return NOTIFY_OK;
|
||||
vfp_force_reload(cpu, current_thread_info());
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfp_starting_cpu(unsigned int unused)
|
||||
{
|
||||
vfp_enable(NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vfp_kmode_exception(void)
|
||||
@@ -732,6 +732,10 @@ static int __init vfp_init(void)
|
||||
unsigned int vfpsid;
|
||||
unsigned int cpu_arch = cpu_architecture();
|
||||
|
||||
/*
|
||||
* Enable the access to the VFP on all online CPUs so the
|
||||
* following test on FPSID will succeed.
|
||||
*/
|
||||
if (cpu_arch >= CPU_ARCH_ARMv6)
|
||||
on_each_cpu(vfp_enable, NULL, 1);
|
||||
|
||||
@@ -794,7 +798,9 @@ static int __init vfp_init(void)
|
||||
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
|
||||
}
|
||||
|
||||
hotcpu_notifier(vfp_hotplug, 0);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
|
||||
"AP_ARM_VFP_STARTING", vfp_starting_cpu,
|
||||
vfp_dying_cpu);
|
||||
|
||||
vfp_vector = vfp_support_entry;
|
||||
|
||||
|
||||
+11
-30
@@ -153,12 +153,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
|
||||
.notifier_call = xen_pvclock_gtod_notify,
|
||||
};
|
||||
|
||||
static void xen_percpu_init(void)
|
||||
static int xen_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
struct vcpu_register_vcpu_info info;
|
||||
struct vcpu_info *vcpup;
|
||||
int err;
|
||||
int cpu = get_cpu();
|
||||
|
||||
/*
|
||||
* VCPUOP_register_vcpu_info cannot be called twice for the same
|
||||
@@ -186,7 +185,13 @@ static void xen_percpu_init(void)
|
||||
|
||||
after_register_vcpu_info:
|
||||
enable_percpu_irq(xen_events_irq, 0);
|
||||
put_cpu();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xen_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
disable_percpu_irq(xen_events_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
|
||||
@@ -205,28 +210,6 @@ static void xen_power_off(void)
|
||||
BUG_ON(rc);
|
||||
}
|
||||
|
||||
static int xen_cpu_notification(struct notifier_block *self,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
switch (action) {
|
||||
case CPU_STARTING:
|
||||
xen_percpu_init();
|
||||
break;
|
||||
case CPU_DYING:
|
||||
disable_percpu_irq(xen_events_irq);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block xen_cpu_notifier = {
|
||||
.notifier_call = xen_cpu_notification,
|
||||
};
|
||||
|
||||
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
||||
{
|
||||
xen_hvm_evtchn_do_upcall();
|
||||
@@ -425,16 +408,14 @@ static int __init xen_guest_init(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
xen_percpu_init();
|
||||
|
||||
register_cpu_notifier(&xen_cpu_notifier);
|
||||
|
||||
xen_time_setup_guest();
|
||||
|
||||
if (xen_initial_domain())
|
||||
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
|
||||
|
||||
return 0;
|
||||
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
|
||||
"AP_ARM_XEN_STARTING", xen_starting_cpu,
|
||||
xen_dying_cpu);
|
||||
}
|
||||
early_initcall(xen_guest_init);
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
|
||||
* 0 - If all the hooks ran successfully.
|
||||
* -EINVAL - At least one hook is not supported by the CPU.
|
||||
*/
|
||||
static int run_all_insn_set_hw_mode(unsigned long cpu)
|
||||
static int run_all_insn_set_hw_mode(unsigned int cpu)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long flags;
|
||||
@@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu)
|
||||
list_for_each_entry(insn, &insn_emulation, node) {
|
||||
bool enable = (insn->current_mode == INSN_HW);
|
||||
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
|
||||
pr_warn("CPU[%ld] cannot support the emulation of %s",
|
||||
pr_warn("CPU[%u] cannot support the emulation of %s",
|
||||
cpu, insn->ops->name);
|
||||
rc = -EINVAL;
|
||||
}
|
||||
@@ -611,20 +611,6 @@ static struct insn_emulation_ops setend_ops = {
|
||||
.set_hw_mode = setend_set_hw_mode,
|
||||
};
|
||||
|
||||
static int insn_cpu_hotplug_notify(struct notifier_block *b,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
|
||||
rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
|
||||
|
||||
return notifier_from_errno(rc);
|
||||
}
|
||||
|
||||
static struct notifier_block insn_cpu_hotplug_notifier = {
|
||||
.notifier_call = insn_cpu_hotplug_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* Invoked as late_initcall, since not needed before init spawned.
|
||||
*/
|
||||
@@ -643,7 +629,9 @@ static int __init armv8_deprecated_init(void)
|
||||
pr_info("setend instruction emulation is not supported on the system");
|
||||
}
|
||||
|
||||
register_cpu_notifier(&insn_cpu_hotplug_notifier);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
"AP_ARM64_ISNDEP_STARTING",
|
||||
run_all_insn_set_hw_mode, NULL);
|
||||
register_insn_emulation_sysctl(ctl_abi);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -453,29 +453,13 @@ static struct pmu pmu = {
|
||||
.read = bfin_pmu_read,
|
||||
};
|
||||
|
||||
static void bfin_pmu_setup(int cpu)
|
||||
static int bfin_pmu_prepare_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
bfin_write_PFCTL(0);
|
||||
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
||||
}
|
||||
|
||||
static int
|
||||
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
bfin_write_PFCTL(0);
|
||||
bfin_pmu_setup(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init bfin_pmu_init(void)
|
||||
@@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void)
|
||||
|
||||
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||
if (!ret)
|
||||
perf_cpu_notifier(bfin_pmu_notifier);
|
||||
|
||||
cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
|
||||
bfin_pmu_prepare_cpu, NULL);
|
||||
return ret;
|
||||
}
|
||||
early_initcall(bfin_pmu_init);
|
||||
|
||||
@@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = {
|
||||
};
|
||||
|
||||
/* PMU CPU hotplug notifier */
|
||||
static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
|
||||
void *hcpu)
|
||||
static int metag_pmu_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)hcpu;
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
memset(cpuc, 0, sizeof(struct cpu_hw_events));
|
||||
raw_spin_lock_init(&cpuc->pmu_lock);
|
||||
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block metag_pmu_notifier = {
|
||||
.notifier_call = metag_pmu_cpu_notify,
|
||||
};
|
||||
|
||||
/* PMU Initialisation */
|
||||
static int __init init_hw_perf_events(void)
|
||||
{
|
||||
@@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void)
|
||||
metag_out32(0, PERF_COUNT(0));
|
||||
metag_out32(0, PERF_COUNT(1));
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
|
||||
"AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
|
||||
NULL);
|
||||
|
||||
memset(cpuc, 0, sizeof(struct cpu_hw_events));
|
||||
raw_spin_lock_init(&cpuc->pmu_lock);
|
||||
}
|
||||
|
||||
register_cpu_notifier(&metag_pmu_notifier);
|
||||
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
|
||||
out:
|
||||
if (ret)
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
|
||||
return ret;
|
||||
}
|
||||
early_initcall(init_hw_perf_events);
|
||||
|
||||
@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int loongson3_cpu_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
static int loongson3_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
switch (action) {
|
||||
case CPU_STARTING:
|
||||
case CPU_STARTING_FROZEN:
|
||||
write_c0_perflo1(reg.control1);
|
||||
write_c0_perflo2(reg.control2);
|
||||
break;
|
||||
case CPU_DYING:
|
||||
case CPU_DYING_FROZEN:
|
||||
write_c0_perflo1(0xc0000000);
|
||||
write_c0_perflo2(0x40000000);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
write_c0_perflo1(reg.control1);
|
||||
write_c0_perflo2(reg.control2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block loongson3_notifier_block = {
|
||||
.notifier_call = loongson3_cpu_callback
|
||||
};
|
||||
static int loongson3_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
write_c0_perflo1(0xc0000000);
|
||||
write_c0_perflo2(0x40000000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init loongson3_init(void)
|
||||
{
|
||||
on_each_cpu(reset_counters, NULL, 1);
|
||||
register_hotcpu_notifier(&loongson3_notifier_block);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
||||
"AP_MIPS_OP_LOONGSON3_STARTING",
|
||||
loongson3_starting_cpu, loongson3_dying_cpu);
|
||||
save_perf_irq = perf_irq;
|
||||
perf_irq = loongson3_perfcount_handler;
|
||||
|
||||
@@ -204,7 +197,7 @@ static int __init loongson3_init(void)
|
||||
static void loongson3_exit(void)
|
||||
{
|
||||
on_each_cpu(reset_counters, NULL, 1);
|
||||
unregister_hotcpu_notifier(&loongson3_notifier_block);
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
|
||||
perf_irq = save_perf_irq;
|
||||
}
|
||||
|
||||
|
||||
+19
-29
@@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node)
|
||||
}
|
||||
}
|
||||
|
||||
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
|
||||
void *hcpu)
|
||||
/* Must run before sched domains notifier. */
|
||||
static int ppc_numa_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
unsigned long lcpu = (unsigned long)hcpu;
|
||||
int ret = NOTIFY_DONE, nid;
|
||||
int nid;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
nid = numa_setup_cpu(lcpu);
|
||||
verify_cpu_node_mapping((int)lcpu, nid);
|
||||
ret = NOTIFY_OK;
|
||||
break;
|
||||
nid = numa_setup_cpu(cpu);
|
||||
verify_cpu_node_mapping(cpu, nid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppc_numa_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
unmap_cpu_from_node(lcpu);
|
||||
ret = NOTIFY_OK;
|
||||
break;
|
||||
unmap_cpu_from_node(cpu);
|
||||
#endif
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void)
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block ppc64_numa_nb = {
|
||||
.notifier_call = cpu_numa_callback,
|
||||
.priority = 1 /* Must run before sched domains notifier. */
|
||||
};
|
||||
|
||||
/* Initialize NODE_DATA for a node on the local memory */
|
||||
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
||||
{
|
||||
@@ -985,15 +972,18 @@ void __init initmem_init(void)
|
||||
setup_node_to_cpumask_map();
|
||||
|
||||
reset_numa_cpu_lookup_table();
|
||||
register_cpu_notifier(&ppc64_numa_nb);
|
||||
|
||||
/*
|
||||
* We need the numa_cpu_lookup_table to be accurate for all CPUs,
|
||||
* even before we online them, so that we can use cpu_to_{node,mem}
|
||||
* early in boot, cf. smp_prepare_cpus().
|
||||
* _nocalls() + manual invocation is used because cpuhp is not yet
|
||||
* initialized for the boot CPU.
|
||||
*/
|
||||
for_each_present_cpu(cpu) {
|
||||
numa_setup_cpu((unsigned long)cpu);
|
||||
}
|
||||
cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
|
||||
ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
|
||||
for_each_present_cpu(cpu)
|
||||
numa_setup_cpu(cpu);
|
||||
}
|
||||
|
||||
static int __init early_numa(char *p)
|
||||
|
||||
@@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs)
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static void power_pmu_setup(int cpu)
|
||||
int power_pmu_prepare_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
if (!ppmu)
|
||||
return;
|
||||
memset(cpuhw, 0, sizeof(*cpuhw));
|
||||
cpuhw->mmcr[0] = MMCR0_FC;
|
||||
}
|
||||
|
||||
static int
|
||||
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
power_pmu_setup(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
if (ppmu) {
|
||||
memset(cpuhw, 0, sizeof(*cpuhw));
|
||||
cpuhw->mmcr[0] = MMCR0_FC;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_power_pmu(struct power_pmu *pmu)
|
||||
@@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
|
||||
perf_cpu_notifier(power_pmu_notifier);
|
||||
|
||||
cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
|
||||
power_pmu_prepare_cpu, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -664,30 +664,22 @@ static struct pmu cpumf_pmu = {
|
||||
.cancel_txn = cpumf_pmu_cancel_txn,
|
||||
};
|
||||
|
||||
static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
|
||||
void *hcpu)
|
||||
static int cpumf_pmf_setup(unsigned int cpu, int flags)
|
||||
{
|
||||
int flags;
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
flags = PMC_INIT;
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
flags = PMC_RELEASE;
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
static int s390_pmu_online_cpu(unsigned int cpu)
|
||||
{
|
||||
return cpumf_pmf_setup(cpu, PMC_INIT);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
static int s390_pmu_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
return cpumf_pmf_setup(cpu, PMC_RELEASE);
|
||||
}
|
||||
|
||||
static int __init cpumf_pmu_init(void)
|
||||
@@ -707,7 +699,7 @@ static int __init cpumf_pmu_init(void)
|
||||
if (rc) {
|
||||
pr_err("Registering for CPU-measurement alerts "
|
||||
"failed with rc=%i\n", rc);
|
||||
goto out;
|
||||
return rc;
|
||||
}
|
||||
|
||||
cpumf_pmu.attr_groups = cpumf_cf_event_group();
|
||||
@@ -716,10 +708,10 @@ static int __init cpumf_pmu_init(void)
|
||||
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
|
||||
unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
|
||||
cpumf_measurement_alert);
|
||||
goto out;
|
||||
return rc;
|
||||
}
|
||||
perf_cpu_notifier(cpumf_pmu_notifier);
|
||||
out:
|
||||
return rc;
|
||||
return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
|
||||
"AP_PERF_S390_CF_ONLINE",
|
||||
s390_pmu_online_cpu, s390_pmu_offline_cpu);
|
||||
}
|
||||
early_initcall(cpumf_pmu_init);
|
||||
|
||||
@@ -1504,37 +1504,28 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
|
||||
sf_disable();
|
||||
}
|
||||
}
|
||||
|
||||
static int cpumf_pmu_notifier(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
static int cpusf_pmu_setup(unsigned int cpu, int flags)
|
||||
{
|
||||
int flags;
|
||||
|
||||
/* Ignore the notification if no events are scheduled on the PMU.
|
||||
* This might be racy...
|
||||
*/
|
||||
if (!atomic_read(&num_events))
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_DOWN_FAILED:
|
||||
flags = PMC_INIT;
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
flags = PMC_RELEASE;
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
local_irq_disable();
|
||||
setup_pmc_cpu(&flags);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
static int s390_pmu_sf_online_cpu(unsigned int cpu)
|
||||
{
|
||||
return cpusf_pmu_setup(cpu, PMC_INIT);
|
||||
}
|
||||
|
||||
static int s390_pmu_sf_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
return cpusf_pmu_setup(cpu, PMC_RELEASE);
|
||||
}
|
||||
|
||||
static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
|
||||
@@ -1634,7 +1625,9 @@ static int __init init_cpum_sampling_pmu(void)
|
||||
cpumf_measurement_alert);
|
||||
goto out;
|
||||
}
|
||||
perf_cpu_notifier(cpumf_pmu_notifier);
|
||||
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
|
||||
s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -352,28 +352,12 @@ static struct pmu pmu = {
|
||||
.read = sh_pmu_read,
|
||||
};
|
||||
|
||||
static void sh_pmu_setup(int cpu)
|
||||
static int sh_pmu_prepare_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
|
||||
|
||||
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
|
||||
}
|
||||
|
||||
static int
|
||||
sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
sh_pmu_setup(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_sh_pmu(struct sh_pmu *_pmu)
|
||||
@@ -394,6 +378,7 @@ int register_sh_pmu(struct sh_pmu *_pmu)
|
||||
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
|
||||
|
||||
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
|
||||
perf_cpu_notifier(sh_pmu_notifier);
|
||||
cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
|
||||
NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -331,15 +331,9 @@ static void vgetcpu_cpu_init(void *arg)
|
||||
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
|
||||
}
|
||||
|
||||
static int
|
||||
vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
|
||||
static int vgetcpu_online(unsigned int cpu)
|
||||
{
|
||||
long cpu = (long)arg;
|
||||
|
||||
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
|
||||
smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
|
||||
}
|
||||
|
||||
static int __init init_vdso(void)
|
||||
@@ -350,15 +344,9 @@ static int __init init_vdso(void)
|
||||
init_vdso_image(&vdso_image_x32);
|
||||
#endif
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
on_each_cpu(vgetcpu_cpu_init, NULL, 1);
|
||||
/* notifier priority > KVM */
|
||||
__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return 0;
|
||||
return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
|
||||
"AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
|
||||
}
|
||||
subsys_initcall(init_vdso);
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
@@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu)
|
||||
WARN_ON_ONCE(cpuc->amd_nb);
|
||||
|
||||
if (!x86_pmu.amd_nb_constraints)
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
|
||||
cpuc->amd_nb = amd_alloc_nb(cpu);
|
||||
if (!cpuc->amd_nb)
|
||||
return NOTIFY_BAD;
|
||||
return -ENOMEM;
|
||||
|
||||
return NOTIFY_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_starting(int cpu)
|
||||
|
||||
+28
-36
@@ -725,13 +725,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __init int perf_event_ibs_init(void)
|
||||
static __init void perf_event_ibs_init(void)
|
||||
{
|
||||
struct attribute **attr = ibs_op_format_attrs;
|
||||
|
||||
if (!ibs_caps)
|
||||
return -ENODEV; /* ibs not supported by the cpu */
|
||||
|
||||
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
|
||||
|
||||
if (ibs_caps & IBS_CAPS_OPCNT) {
|
||||
@@ -742,13 +739,11 @@ static __init int perf_event_ibs_init(void)
|
||||
|
||||
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
|
||||
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
|
||||
|
||||
static __init int perf_event_ibs_init(void) { return 0; }
|
||||
static __init void perf_event_ibs_init(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
@@ -925,7 +920,7 @@ static inline int get_ibs_lvt_offset(void)
|
||||
return val & IBSCTL_LVT_OFFSET_MASK;
|
||||
}
|
||||
|
||||
static void setup_APIC_ibs(void *dummy)
|
||||
static void setup_APIC_ibs(void)
|
||||
{
|
||||
int offset;
|
||||
|
||||
@@ -940,7 +935,7 @@ failed:
|
||||
smp_processor_id());
|
||||
}
|
||||
|
||||
static void clear_APIC_ibs(void *dummy)
|
||||
static void clear_APIC_ibs(void)
|
||||
{
|
||||
int offset;
|
||||
|
||||
@@ -949,18 +944,24 @@ static void clear_APIC_ibs(void *dummy)
|
||||
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
||||
}
|
||||
|
||||
static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
|
||||
{
|
||||
setup_APIC_ibs();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int perf_ibs_suspend(void)
|
||||
{
|
||||
clear_APIC_ibs(NULL);
|
||||
clear_APIC_ibs();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void perf_ibs_resume(void)
|
||||
{
|
||||
ibs_eilvt_setup();
|
||||
setup_APIC_ibs(NULL);
|
||||
setup_APIC_ibs();
|
||||
}
|
||||
|
||||
static struct syscore_ops perf_ibs_syscore_ops = {
|
||||
@@ -979,27 +980,15 @@ static inline void perf_ibs_pm_init(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
static int
|
||||
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_STARTING:
|
||||
setup_APIC_ibs(NULL);
|
||||
break;
|
||||
case CPU_DYING:
|
||||
clear_APIC_ibs(NULL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
clear_APIC_ibs();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init int amd_ibs_init(void)
|
||||
{
|
||||
u32 caps;
|
||||
int ret = -EINVAL;
|
||||
|
||||
caps = __get_ibs_caps();
|
||||
if (!caps)
|
||||
@@ -1008,22 +997,25 @@ static __init int amd_ibs_init(void)
|
||||
ibs_eilvt_setup();
|
||||
|
||||
if (!ibs_eilvt_valid())
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
|
||||
perf_ibs_pm_init();
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
ibs_caps = caps;
|
||||
/* make ibs_caps visible to other cpus: */
|
||||
smp_mb();
|
||||
smp_call_function(setup_APIC_ibs, NULL, 1);
|
||||
__perf_cpu_notifier(perf_ibs_cpu_notifier);
|
||||
cpu_notifier_register_done();
|
||||
/*
|
||||
* x86_pmu_amd_ibs_starting_cpu will be called from core on
|
||||
* all online cpus.
|
||||
*/
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||
"AP_PERF_X86_AMD_IBS_STARTING",
|
||||
x86_pmu_amd_ibs_starting_cpu,
|
||||
x86_pmu_amd_ibs_dying_cpu);
|
||||
|
||||
ret = perf_event_ibs_init();
|
||||
out:
|
||||
if (ret)
|
||||
pr_err("Failed to setup IBS, %d\n", ret);
|
||||
return ret;
|
||||
perf_event_ibs_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
|
||||
|
||||
+12
-48
@@ -228,12 +228,12 @@ static struct pmu pmu_class = {
|
||||
.read = pmu_event_read,
|
||||
};
|
||||
|
||||
static void power_cpu_exit(int cpu)
|
||||
static int power_cpu_exit(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Find a new CPU on the same compute unit, if was set in cpumask
|
||||
@@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu)
|
||||
cpumask_set_cpu(target, &cpu_mask);
|
||||
perf_pmu_migrate_context(&pmu_class, cpu, target);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void power_cpu_init(int cpu)
|
||||
static int power_cpu_init(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
|
||||
@@ -255,7 +256,7 @@ static void power_cpu_init(int cpu)
|
||||
* 1) If any CPU is set at cpu_mask in the same compute unit, do
|
||||
* nothing.
|
||||
* 2) If no CPU is set at cpu_mask in the same compute unit,
|
||||
* set current STARTING CPU.
|
||||
* set current ONLINE CPU.
|
||||
*
|
||||
* Note: if there is a CPU aside of the new one already in the
|
||||
* sibling mask, then it is also in cpu_mask.
|
||||
@@ -263,33 +264,9 @@ static void power_cpu_init(int cpu)
|
||||
target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
|
||||
if (target >= nr_cpumask_bits)
|
||||
cpumask_set_cpu(cpu, &cpu_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_STARTING:
|
||||
power_cpu_init(cpu);
|
||||
break;
|
||||
case CPU_DOWN_PREPARE:
|
||||
power_cpu_exit(cpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block power_cpu_notifier_nb = {
|
||||
.notifier_call = power_cpu_notifier,
|
||||
.priority = CPU_PRI_PERF,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id cpu_match[] = {
|
||||
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
|
||||
{},
|
||||
@@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = {
|
||||
|
||||
static int __init amd_power_pmu_init(void)
|
||||
{
|
||||
int cpu, target, ret;
|
||||
int ret;
|
||||
|
||||
if (!x86_match_cpu(cpu_match))
|
||||
return 0;
|
||||
@@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* Choose one online core of each compute unit. */
|
||||
for_each_online_cpu(cpu) {
|
||||
target = cpumask_first(topology_sibling_cpumask(cpu));
|
||||
if (!cpumask_test_cpu(target, &cpu_mask))
|
||||
cpumask_set_cpu(target, &cpu_mask);
|
||||
}
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
|
||||
"AP_PERF_X86_AMD_POWER_ONLINE",
|
||||
power_cpu_init, power_cpu_exit);
|
||||
|
||||
ret = perf_pmu_register(&pmu_class, "power", -1);
|
||||
if (WARN_ON(ret)) {
|
||||
pr_warn("AMD Power PMU registration failed\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
__register_cpu_notifier(&power_cpu_notifier_nb);
|
||||
|
||||
pr_info("AMD Power PMU detected\n");
|
||||
|
||||
out:
|
||||
cpu_notifier_register_done();
|
||||
|
||||
return ret;
|
||||
}
|
||||
module_init(amd_power_pmu_init);
|
||||
|
||||
static void __exit amd_power_pmu_exit(void)
|
||||
{
|
||||
cpu_notifier_register_begin();
|
||||
__unregister_cpu_notifier(&power_cpu_notifier_nb);
|
||||
cpu_notifier_register_done();
|
||||
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
|
||||
perf_pmu_unregister(&pmu_class);
|
||||
}
|
||||
module_exit(amd_power_pmu_exit);
|
||||
|
||||
@@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
|
||||
return this;
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_starting(unsigned int cpu)
|
||||
static int amd_uncore_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
struct amd_uncore *uncore;
|
||||
@@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu)
|
||||
uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
|
||||
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uncore_online(unsigned int cpu,
|
||||
@@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu,
|
||||
cpumask_set_cpu(cpu, uncore->active_mask);
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_online(unsigned int cpu)
|
||||
static int amd_uncore_cpu_online(unsigned int cpu)
|
||||
{
|
||||
if (amd_uncore_nb)
|
||||
uncore_online(cpu, amd_uncore_nb);
|
||||
|
||||
if (amd_uncore_l2)
|
||||
uncore_online(cpu, amd_uncore_l2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uncore_down_prepare(unsigned int cpu,
|
||||
@@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu,
|
||||
}
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_down_prepare(unsigned int cpu)
|
||||
static int amd_uncore_cpu_down_prepare(unsigned int cpu)
|
||||
{
|
||||
if (amd_uncore_nb)
|
||||
uncore_down_prepare(cpu, amd_uncore_nb);
|
||||
|
||||
if (amd_uncore_l2)
|
||||
uncore_down_prepare(cpu, amd_uncore_l2);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
||||
@@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
|
||||
*per_cpu_ptr(uncores, cpu) = NULL;
|
||||
}
|
||||
|
||||
static void amd_uncore_cpu_dead(unsigned int cpu)
|
||||
static int amd_uncore_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
if (amd_uncore_nb)
|
||||
uncore_dead(cpu, amd_uncore_nb);
|
||||
|
||||
if (amd_uncore_l2)
|
||||
uncore_dead(cpu, amd_uncore_l2);
|
||||
}
|
||||
|
||||
static int
|
||||
amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_UP_PREPARE:
|
||||
if (amd_uncore_cpu_up_prepare(cpu))
|
||||
return notifier_from_errno(-ENOMEM);
|
||||
break;
|
||||
|
||||
case CPU_STARTING:
|
||||
amd_uncore_cpu_starting(cpu);
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
amd_uncore_cpu_online(cpu);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
amd_uncore_cpu_down_prepare(cpu);
|
||||
break;
|
||||
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_DEAD:
|
||||
amd_uncore_cpu_dead(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block amd_uncore_cpu_notifier_block = {
|
||||
.notifier_call = amd_uncore_cpu_notifier,
|
||||
.priority = CPU_PRI_PERF + 1,
|
||||
};
|
||||
|
||||
static void __init init_cpu_already_online(void *dummy)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
amd_uncore_cpu_starting(cpu);
|
||||
amd_uncore_cpu_online(cpu);
|
||||
}
|
||||
|
||||
static void cleanup_cpu_online(void *dummy)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
amd_uncore_cpu_dead(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init amd_uncore_init(void)
|
||||
{
|
||||
unsigned int cpu, cpu2;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
@@ -558,38 +509,29 @@ static int __init amd_uncore_init(void)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto fail_nodev;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
/* init cpus already online before registering for hotplug notifier */
|
||||
for_each_online_cpu(cpu) {
|
||||
ret = amd_uncore_cpu_up_prepare(cpu);
|
||||
if (ret)
|
||||
goto fail_online;
|
||||
smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
|
||||
}
|
||||
|
||||
__register_cpu_notifier(&amd_uncore_cpu_notifier_block);
|
||||
cpu_notifier_register_done();
|
||||
/*
|
||||
* Install callbacks. Core will call them for each online cpu.
|
||||
*/
|
||||
if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
||||
"PERF_X86_AMD_UNCORE_PREP",
|
||||
amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
|
||||
goto fail_l2;
|
||||
|
||||
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
"AP_PERF_X86_AMD_UNCORE_STARTING",
|
||||
amd_uncore_cpu_starting, NULL))
|
||||
goto fail_prep;
|
||||
if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
||||
"AP_PERF_X86_AMD_UNCORE_ONLINE",
|
||||
amd_uncore_cpu_online,
|
||||
amd_uncore_cpu_down_prepare))
|
||||
goto fail_start;
|
||||
return 0;
|
||||
|
||||
|
||||
fail_online:
|
||||
for_each_online_cpu(cpu2) {
|
||||
if (cpu2 == cpu)
|
||||
break;
|
||||
smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
|
||||
}
|
||||
cpu_notifier_register_done();
|
||||
|
||||
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
|
||||
amd_uncore_nb = amd_uncore_l2 = NULL;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
|
||||
perf_pmu_unregister(&amd_l2_pmu);
|
||||
fail_start:
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
|
||||
fail_prep:
|
||||
cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
|
||||
fail_l2:
|
||||
if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
|
||||
perf_pmu_unregister(&amd_nb_pmu);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user