You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
This commit is contained in:
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config GENERIC_CPUFREQ_CPU0
|
||||
bool "Generic CPU0 cpufreq driver"
|
||||
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
|
||||
select CPU_FREQ_TABLE
|
||||
help
|
||||
This adds a generic cpufreq driver for CPU0 frequency management.
|
||||
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
|
||||
systems which share clock and voltage across all CPUs.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
menu "x86 CPU frequency scaling drivers"
|
||||
depends on X86
|
||||
source "drivers/cpufreq/Kconfig.x86"
|
||||
|
||||
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
|
||||
help
|
||||
This driver adds a CPUFreq driver which utilizes the ACPI
|
||||
Processor Performance States.
|
||||
This driver also supports Intel Enhanced Speedstep.
|
||||
This driver also supports Intel Enhanced Speedstep and newer
|
||||
AMD CPUs.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called acpi-cpufreq.
|
||||
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config X86_ACPI_CPUFREQ_CPB
|
||||
default y
|
||||
bool "Legacy cpb sysfs knob support for AMD CPUs"
|
||||
depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
|
||||
help
|
||||
The powernow-k8 driver used to provide a sysfs knob called "cpb"
|
||||
to disable the Core Performance Boosting feature of AMD CPUs. This
|
||||
file has now been superseeded by the more generic "boost" entry.
|
||||
|
||||
By enabling this option the acpi_cpufreq driver provides the old
|
||||
entry in addition to the new boost ones, for compatibility reasons.
|
||||
|
||||
config ELAN_CPUFREQ
|
||||
tristate "AMD Elan SC400 and SC410"
|
||||
select CPU_FREQ_TABLE
|
||||
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
|
||||
select CPU_FREQ_TABLE
|
||||
depends on ACPI && ACPI_PROCESSOR
|
||||
help
|
||||
This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors.
|
||||
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
|
||||
Support for K10 and newer processors is now in acpi-cpufreq.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called powernow-k8.
|
||||
|
||||
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
|
||||
# CPUfreq cross-arch helpers
|
||||
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
|
||||
|
||||
##################################################################################
|
||||
# x86 drivers.
|
||||
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
|
||||
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
|
||||
# speedstep-* is preferred over p4-clockmod.
|
||||
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
|
||||
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
|
||||
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
|
||||
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
|
||||
|
||||
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
|
||||
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define PFX "acpi-cpufreq: "
|
||||
|
||||
enum {
|
||||
UNDEFINED_CAPABLE = 0,
|
||||
SYSTEM_INTEL_MSR_CAPABLE,
|
||||
SYSTEM_AMD_MSR_CAPABLE,
|
||||
SYSTEM_IO_CAPABLE,
|
||||
};
|
||||
|
||||
#define INTEL_MSR_RANGE (0xffff)
|
||||
#define AMD_MSR_RANGE (0x7)
|
||||
|
||||
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
|
||||
|
||||
struct acpi_cpufreq_data {
|
||||
struct acpi_processor_performance *acpi_data;
|
||||
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
|
||||
static struct cpufreq_driver acpi_cpufreq_driver;
|
||||
|
||||
static unsigned int acpi_pstate_strict;
|
||||
static bool boost_enabled, boost_supported;
|
||||
static struct msr __percpu *msrs;
|
||||
|
||||
static bool boost_state(unsigned int cpu)
|
||||
{
|
||||
u32 lo, hi;
|
||||
u64 msr;
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
|
||||
case X86_VENDOR_AMD:
|
||||
rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
|
||||
msr = lo | ((u64)hi << 32);
|
||||
return !(msr & MSR_K7_HWCR_CPB_DIS);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
|
||||
{
|
||||
u32 cpu;
|
||||
u32 msr_addr;
|
||||
u64 msr_mask;
|
||||
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
msr_addr = MSR_IA32_MISC_ENABLE;
|
||||
msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
msr_addr = MSR_K7_HWCR;
|
||||
msr_mask = MSR_K7_HWCR_CPB_DIS;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
rdmsr_on_cpus(cpumask, msr_addr, msrs);
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
struct msr *reg = per_cpu_ptr(msrs, cpu);
|
||||
if (enable)
|
||||
reg->q &= ~msr_mask;
|
||||
else
|
||||
reg->q |= msr_mask;
|
||||
}
|
||||
|
||||
wrmsr_on_cpus(cpumask, msr_addr, msrs);
|
||||
}
|
||||
|
||||
static ssize_t _store_boost(const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
unsigned long val = 0;
|
||||
|
||||
if (!boost_supported)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kstrtoul(buf, 10, &val);
|
||||
if (ret || (val > 1))
|
||||
return -EINVAL;
|
||||
|
||||
if ((val && boost_enabled) || (!val && !boost_enabled))
|
||||
return count;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
boost_set_msrs(val, cpu_online_mask);
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
boost_enabled = val;
|
||||
pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
return _store_boost(buf, count);
|
||||
}
|
||||
|
||||
static ssize_t show_global_boost(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", boost_enabled);
|
||||
}
|
||||
|
||||
static struct global_attr global_boost = __ATTR(boost, 0644,
|
||||
show_global_boost,
|
||||
store_global_boost);
|
||||
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
return _store_boost(buf, count);
|
||||
}
|
||||
|
||||
static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", boost_enabled);
|
||||
}
|
||||
|
||||
static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
|
||||
#endif
|
||||
|
||||
static int check_est_cpu(unsigned int cpuid)
|
||||
{
|
||||
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
|
||||
return cpu_has(cpu, X86_FEATURE_EST);
|
||||
}
|
||||
|
||||
static int check_amd_hwpstate_cpu(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
|
||||
|
||||
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
|
||||
}
|
||||
|
||||
static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
struct acpi_processor_performance *perf;
|
||||
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
|
||||
int i;
|
||||
struct acpi_processor_performance *perf;
|
||||
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
msr &= AMD_MSR_RANGE;
|
||||
else
|
||||
msr &= INTEL_MSR_RANGE;
|
||||
|
||||
perf = data->acpi_data;
|
||||
|
||||
for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
|
||||
{
|
||||
switch (data->cpu_feature) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
return extract_msr(val, data);
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
return extract_io(val, data);
|
||||
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
|
||||
|
||||
switch (cmd->type) {
|
||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
rdmsr(cmd->addr.msr.reg, cmd->val, h);
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
|
||||
lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
|
||||
wrmsr(cmd->addr.msr.reg, lo, hi);
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
wrmsr(cmd->addr.msr.reg, cmd->val, 0);
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
|
||||
cmd->val,
|
||||
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
|
||||
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||
cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
|
||||
cmd.val = (u32) perf->states[next_perf_state].control;
|
||||
break;
|
||||
case SYSTEM_AMD_MSR_CAPABLE:
|
||||
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
|
||||
cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
|
||||
cmd.val = (u32) perf->states[next_perf_state].control;
|
||||
break;
|
||||
case SYSTEM_IO_CAPABLE:
|
||||
cmd.type = SYSTEM_IO_CAPABLE;
|
||||
cmd.addr.io.port = perf->control_register.address;
|
||||
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
|
||||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
static int boost_notify(struct notifier_block *nb, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned cpu = (long)hcpu;
|
||||
const struct cpumask *cpumask;
|
||||
|
||||
cpumask = get_cpu_mask(cpu);
|
||||
|
||||
/*
|
||||
* Clear the boost-disable bit on the CPU_DOWN path so that
|
||||
* this cpu cannot block the remaining ones from boosting. On
|
||||
* the CPU_UP path we simply keep the boost-disable flag in
|
||||
* sync with the current global state.
|
||||
*/
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
boost_set_msrs(boost_enabled, cpumask);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
boost_set_msrs(1, cpumask);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
||||
static struct notifier_block boost_nb = {
|
||||
.notifier_call = boost_notify,
|
||||
};
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
|
||||
}
|
||||
|
||||
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
|
||||
cpumask_clear(policy->cpus);
|
||||
cpumask_set_cpu(cpu, policy->cpus);
|
||||
cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
||||
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
/* capability check */
|
||||
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
break;
|
||||
case ACPI_ADR_SPACE_FIXED_HARDWARE:
|
||||
pr_debug("HARDWARE addr space\n");
|
||||
if (!check_est_cpu(cpu)) {
|
||||
result = -ENODEV;
|
||||
goto err_unreg;
|
||||
if (check_est_cpu(cpu)) {
|
||||
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
break;
|
||||
}
|
||||
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
|
||||
break;
|
||||
if (check_amd_hwpstate_cpu(cpu)) {
|
||||
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
|
||||
break;
|
||||
}
|
||||
result = -ENODEV;
|
||||
goto err_unreg;
|
||||
default:
|
||||
pr_debug("Unknown addr space %d\n",
|
||||
(u32) (perf->control_register.space_id));
|
||||
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||
|
||||
static struct freq_attr *acpi_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL, /* this is a placeholder for cpb, do not remove */
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
|
||||
.attr = acpi_cpufreq_attr,
|
||||
};
|
||||
|
||||
static void __init acpi_cpufreq_boost_init(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
|
||||
msrs = msrs_alloc();
|
||||
|
||||
if (!msrs)
|
||||
return;
|
||||
|
||||
boost_supported = true;
|
||||
boost_enabled = boost_state(0);
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
/* Force all MSRs to the same value */
|
||||
boost_set_msrs(boost_enabled, cpu_online_mask);
|
||||
|
||||
register_cpu_notifier(&boost_nb);
|
||||
|
||||
put_online_cpus();
|
||||
} else
|
||||
global_boost.attr.mode = 0444;
|
||||
|
||||
/* We create the boost file in any case, though for systems without
|
||||
* hardware support it will be read-only and hardwired to return 0.
|
||||
*/
|
||||
if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
|
||||
pr_warn(PFX "could not register global boost sysfs file\n");
|
||||
else
|
||||
pr_debug("registered global boost sysfs file\n");
|
||||
}
|
||||
|
||||
static void __exit acpi_cpufreq_boost_exit(void)
|
||||
{
|
||||
sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
|
||||
|
||||
if (msrs) {
|
||||
unregister_cpu_notifier(&boost_nb);
|
||||
|
||||
msrs_free(msrs);
|
||||
msrs = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init acpi_cpufreq_init(void)
|
||||
{
|
||||
int ret;
|
||||
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
|
||||
/* this is a sysfs file with a strange name and an even stranger
|
||||
* semantic - per CPU instantiation, but system global effect.
|
||||
* Lets enable it only on AMD CPUs for compatibility reasons and
|
||||
* only if configured. This is considered legacy code, which
|
||||
* will probably be removed at some point in the future.
|
||||
*/
|
||||
if (check_amd_hwpstate_cpu(0)) {
|
||||
struct freq_attr **iter;
|
||||
|
||||
pr_debug("adding sysfs entry for cpb\n");
|
||||
|
||||
for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
|
||||
;
|
||||
|
||||
/* make sure there is a terminator behind it */
|
||||
if (iter[1] == NULL)
|
||||
*iter = &cpb;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret)
|
||||
free_acpi_perf_data();
|
||||
else
|
||||
acpi_cpufreq_boost_init();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
|
||||
{
|
||||
pr_debug("acpi_cpufreq_exit\n");
|
||||
|
||||
acpi_cpufreq_boost_exit();
|
||||
|
||||
cpufreq_unregister_driver(&acpi_cpufreq_driver);
|
||||
|
||||
free_acpi_perf_data();
|
||||
|
||||
@@ -0,0 +1,269 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* The OPP code in function cpu0_set_target() is reused from
|
||||
* drivers/cpufreq/omap-cpufreq.c
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static unsigned int transition_latency;
|
||||
static unsigned int voltage_tolerance; /* in percentage */
|
||||
|
||||
static struct device *cpu_dev;
|
||||
static struct clk *cpu_clk;
|
||||
static struct regulator *cpu_reg;
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
|
||||
static int cpu0_verify_speed(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpufreq_frequency_table_verify(policy, freq_table);
|
||||
}
|
||||
|
||||
static unsigned int cpu0_get_speed(unsigned int cpu)
|
||||
{
|
||||
return clk_get_rate(cpu_clk) / 1000;
|
||||
}
|
||||
|
||||
static int cpu0_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, unsigned int relation)
|
||||
{
|
||||
struct cpufreq_freqs freqs;
|
||||
struct opp *opp;
|
||||
unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
|
||||
unsigned int index, cpu;
|
||||
int ret;
|
||||
|
||||
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
|
||||
relation, &index);
|
||||
if (ret) {
|
||||
pr_err("failed to match target freqency %d: %d\n",
|
||||
target_freq, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
|
||||
if (freq_Hz < 0)
|
||||
freq_Hz = freq_table[index].frequency * 1000;
|
||||
freqs.new = freq_Hz / 1000;
|
||||
freqs.old = clk_get_rate(cpu_clk) / 1000;
|
||||
|
||||
if (freqs.old == freqs.new)
|
||||
return 0;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
|
||||
}
|
||||
|
||||
if (cpu_reg) {
|
||||
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
|
||||
if (IS_ERR(opp)) {
|
||||
pr_err("failed to find OPP for %ld\n", freq_Hz);
|
||||
return PTR_ERR(opp);
|
||||
}
|
||||
volt = opp_get_voltage(opp);
|
||||
tol = volt * voltage_tolerance / 100;
|
||||
volt_old = regulator_get_voltage(cpu_reg);
|
||||
}
|
||||
|
||||
pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
|
||||
freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
|
||||
freqs.new / 1000, volt ? volt / 1000 : -1);
|
||||
|
||||
/* scaling up? scale voltage before frequency */
|
||||
if (cpu_reg && freqs.new > freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage up: %d\n", ret);
|
||||
freqs.new = freqs.old;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = clk_set_rate(cpu_clk, freqs.new * 1000);
|
||||
if (ret) {
|
||||
pr_err("failed to set clock rate: %d\n", ret);
|
||||
if (cpu_reg)
|
||||
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* scaling down? scale voltage after frequency */
|
||||
if (cpu_reg && freqs.new < freqs.old) {
|
||||
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
||||
if (ret) {
|
||||
pr_err("failed to scale voltage down: %d\n", ret);
|
||||
clk_set_rate(cpu_clk, freqs.old * 1000);
|
||||
freqs.new = freqs.old;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
freqs.cpu = cpu;
|
||||
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (policy->cpu != 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
|
||||
if (ret) {
|
||||
pr_err("invalid frequency table: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
policy->cpuinfo.transition_latency = transition_latency;
|
||||
policy->cur = clk_get_rate(cpu_clk) / 1000;
|
||||
|
||||
/*
|
||||
* The driver only supports the SMP configuartion where all processors
|
||||
* share the clock and voltage and clock. Use cpufreq affected_cpus
|
||||
* interface to have all CPUs scaled together.
|
||||
*/
|
||||
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
||||
cpumask_setall(policy->cpus);
|
||||
|
||||
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct freq_attr *cpu0_cpufreq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct cpufreq_driver cpu0_cpufreq_driver = {
|
||||
.flags = CPUFREQ_STICKY,
|
||||
.verify = cpu0_verify_speed,
|
||||
.target = cpu0_set_target,
|
||||
.get = cpu0_get_speed,
|
||||
.init = cpu0_cpufreq_init,
|
||||
.exit = cpu0_cpufreq_exit,
|
||||
.name = "generic_cpu0",
|
||||
.attr = cpu0_cpufreq_attr,
|
||||
};
|
||||
|
||||
static int __devinit cpu0_cpufreq_driver_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
np = of_find_node_by_path("/cpus/cpu@0");
|
||||
if (!np) {
|
||||
pr_err("failed to find cpu0 node\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu0 device\n");
|
||||
ret = -ENODEV;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cpu_dev->of_node = np;
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
pr_err("failed to get cpu0 clock: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
cpu_reg = regulator_get(cpu_dev, "cpu0");
|
||||
if (IS_ERR(cpu_reg)) {
|
||||
pr_warn("failed to get cpu0 regulator\n");
|
||||
cpu_reg = NULL;
|
||||
}
|
||||
|
||||
ret = of_init_opp_table(cpu_dev);
|
||||
if (ret) {
|
||||
pr_err("failed to init OPP table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
pr_err("failed to init cpufreq table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
|
||||
|
||||
if (of_property_read_u32(np, "clock-latency", &transition_latency))
|
||||
transition_latency = CPUFREQ_ETERNAL;
|
||||
|
||||
if (cpu_reg) {
|
||||
struct opp *opp;
|
||||
unsigned long min_uV, max_uV;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* OPP is maintained in order of increasing frequency, and
|
||||
* freq_table initialised from OPP is therefore sorted in the
|
||||
* same order.
|
||||
*/
|
||||
for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
|
||||
;
|
||||
opp = opp_find_freq_exact(cpu_dev,
|
||||
freq_table[0].frequency * 1000, true);
|
||||
min_uV = opp_get_voltage(opp);
|
||||
opp = opp_find_freq_exact(cpu_dev,
|
||||
freq_table[i-1].frequency * 1000, true);
|
||||
max_uV = opp_get_voltage(opp);
|
||||
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
|
||||
if (ret > 0)
|
||||
transition_latency += ret * 1000;
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
|
||||
if (ret) {
|
||||
pr_err("failed register driver: %d\n", ret);
|
||||
goto out_free_table;
|
||||
}
|
||||
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
|
||||
out_free_table:
|
||||
opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
late_initcall(cpu0_cpufreq_driver_init);
|
||||
|
||||
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
|
||||
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
j_dbs_info->prev_cpu_nice =
|
||||
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||
}
|
||||
this_dbs_info->cpu = cpu;
|
||||
this_dbs_info->down_skip = 0;
|
||||
this_dbs_info->requested_freq = policy->cur;
|
||||
|
||||
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
__cpufreq_driver_target(
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
|
||||
break;
|
||||
|
||||
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
else if (policy->min > this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(this_dbs_info);
|
||||
mutex_unlock(&this_dbs_info->timer_mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
+13
-13
@@ -56,7 +56,7 @@ union msr_longhaul {
|
||||
/*
|
||||
* VIA C3 Samuel 1 & Samuel 2 (stepping 0)
|
||||
*/
|
||||
static const int __cpuinitdata samuel1_mults[16] = {
|
||||
static const int __cpuinitconst samuel1_mults[16] = {
|
||||
-1, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
|
||||
-1, /* 1111 -> RESERVED */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata samuel1_eblcr[16] = {
|
||||
static const int __cpuinitconst samuel1_eblcr[16] = {
|
||||
50, /* 0000 -> RESERVED */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 Samuel2 Stepping 1->15
|
||||
*/
|
||||
static const int __cpuinitdata samuel2_eblcr[16] = {
|
||||
static const int __cpuinitconst samuel2_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 Ezra
|
||||
*/
|
||||
static const int __cpuinitdata ezra_mults[16] = {
|
||||
static const int __cpuinitconst ezra_mults[16] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
|
||||
120, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata ezra_eblcr[16] = {
|
||||
static const int __cpuinitconst ezra_eblcr[16] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
|
||||
/*
|
||||
* VIA C3 (Ezra-T) [C5M].
|
||||
*/
|
||||
static const int __cpuinitdata ezrat_mults[32] = {
|
||||
static const int __cpuinitconst ezrat_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
|
||||
-1, /* 1111 -> RESERVED (12.0x) */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata ezrat_eblcr[32] = {
|
||||
static const int __cpuinitconst ezrat_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
30, /* 0001 -> 3.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
|
||||
/*
|
||||
* VIA C3 Nehemiah */
|
||||
|
||||
static const int __cpuinitdata nehemiah_mults[32] = {
|
||||
static const int __cpuinitconst nehemiah_mults[32] = {
|
||||
100, /* 0000 -> 10.0x */
|
||||
-1, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
|
||||
-1, /* 1111 -> 12.0x */
|
||||
};
|
||||
|
||||
static const int __cpuinitdata nehemiah_eblcr[32] = {
|
||||
static const int __cpuinitconst nehemiah_eblcr[32] = {
|
||||
50, /* 0000 -> 5.0x */
|
||||
160, /* 0001 -> 16.0x */
|
||||
40, /* 0010 -> 4.0x */
|
||||
@@ -315,7 +315,7 @@ struct mV_pos {
|
||||
unsigned short pos;
|
||||
};
|
||||
|
||||
static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
|
||||
static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
|
||||
{1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
|
||||
{1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
|
||||
{1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
|
||||
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
|
||||
{1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
|
||||
};
|
||||
|
||||
static const unsigned char __cpuinitdata mV_vrm85[32] = {
|
||||
static const unsigned char __cpuinitconst mV_vrm85[32] = {
|
||||
0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
|
||||
0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
|
||||
0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
|
||||
0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
|
||||
};
|
||||
|
||||
static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
|
||||
static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
|
||||
{1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
|
||||
{1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
|
||||
{1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
|
||||
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
|
||||
{675, 3}, {650, 2}, {625, 1}, {600, 0}
|
||||
};
|
||||
|
||||
static const unsigned char __cpuinitdata mV_mobilevrm[32] = {
|
||||
static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
|
||||
0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
|
||||
0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
|
||||
0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
|
||||
|
||||
@@ -40,16 +40,6 @@
|
||||
/* OPP tolerance in percentage */
|
||||
#define OPP_TOLERANCE 4
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
struct lpj_info {
|
||||
unsigned long ref;
|
||||
unsigned int freq;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
|
||||
static struct lpj_info global_lpj_ref;
|
||||
#endif
|
||||
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
static atomic_t freq_table_users = ATOMIC_INIT(0);
|
||||
static struct clk *mpu_clk;
|
||||
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
|
||||
}
|
||||
|
||||
freqs.new = omap_getspeed(policy->cpu);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Note that loops_per_jiffy is not updated on SMP systems in
|
||||
* cpufreq driver. So, update the per-CPU loops_per_jiffy value
|
||||
* on frequency transition. We need to update all dependent CPUs.
|
||||
*/
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
struct lpj_info *lpj = &per_cpu(lpj_ref, i);
|
||||
if (!lpj->freq) {
|
||||
lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
|
||||
lpj->freq = freqs.old;
|
||||
}
|
||||
|
||||
per_cpu(cpu_data, i).loops_per_jiffy =
|
||||
cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
|
||||
}
|
||||
|
||||
/* And don't forget to adjust the global one */
|
||||
if (!global_lpj_ref.freq) {
|
||||
global_lpj_ref.ref = loops_per_jiffy;
|
||||
global_lpj_ref.freq = freqs.old;
|
||||
}
|
||||
loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
|
||||
freqs.new);
|
||||
#endif
|
||||
|
||||
done:
|
||||
/* notifiers */
|
||||
@@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
|
||||
}
|
||||
|
||||
mpu_dev = omap_device_get_by_hwmod_name("mpu");
|
||||
if (!mpu_dev) {
|
||||
if (IS_ERR(mpu_dev)) {
|
||||
pr_warning("%s: unable to get the mpu device\n", __func__);
|
||||
return -EINVAL;
|
||||
return PTR_ERR(mpu_dev);
|
||||
}
|
||||
|
||||
mpu_reg = regulator_get(mpu_dev, "vcc");
|
||||
|
||||
+41
-367
File diff suppressed because it is too large
Load Diff
@@ -5,24 +5,11 @@
|
||||
* http://www.gnu.org/licenses/gpl.html
|
||||
*/
|
||||
|
||||
enum pstate {
|
||||
HW_PSTATE_INVALID = 0xff,
|
||||
HW_PSTATE_0 = 0,
|
||||
HW_PSTATE_1 = 1,
|
||||
HW_PSTATE_2 = 2,
|
||||
HW_PSTATE_3 = 3,
|
||||
HW_PSTATE_4 = 4,
|
||||
HW_PSTATE_5 = 5,
|
||||
HW_PSTATE_6 = 6,
|
||||
HW_PSTATE_7 = 7,
|
||||
};
|
||||
|
||||
struct powernow_k8_data {
|
||||
unsigned int cpu;
|
||||
|
||||
u32 numps; /* number of p-states */
|
||||
u32 batps; /* number of p-states supported on battery */
|
||||
u32 max_hw_pstate; /* maximum legal hardware pstate */
|
||||
|
||||
/* these values are constant when the PSB is used to determine
|
||||
* vid/fid pairings, but are modified during the ->target() call
|
||||
@@ -37,7 +24,6 @@ struct powernow_k8_data {
|
||||
/* keep track of the current fid / vid or pstate */
|
||||
u32 currvid;
|
||||
u32 currfid;
|
||||
enum pstate currpstate;
|
||||
|
||||
/* the powernow_table includes all frequency and vid/fid pairings:
|
||||
* fid are the lower 8 bits of the index, vid are the upper 8 bits.
|
||||
@@ -97,23 +83,6 @@ struct powernow_k8_data {
|
||||
#define MSR_S_HI_CURRENT_VID 0x0000003f
|
||||
#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
|
||||
|
||||
|
||||
/* Hardware Pstate _PSS and MSR definitions */
|
||||
#define USE_HW_PSTATE 0x00000080
|
||||
#define HW_PSTATE_MASK 0x00000007
|
||||
#define HW_PSTATE_VALID_MASK 0x80000000
|
||||
#define HW_PSTATE_MAX_MASK 0x000000f0
|
||||
#define HW_PSTATE_MAX_SHIFT 4
|
||||
#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
|
||||
#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
|
||||
#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
|
||||
#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
|
||||
|
||||
/* define the two driver architectures */
|
||||
#define CPU_OPTERON 0
|
||||
#define CPU_HW_PSTATE 1
|
||||
|
||||
|
||||
/*
|
||||
* There are restrictions frequencies have to follow:
|
||||
* - only 1 entry in the low fid table ( <=1.4GHz )
|
||||
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
|
||||
|
||||
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
|
||||
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
|
||||
|
||||
Reference in New Issue
Block a user