Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (65 commits)
  ACPI: suppress power button event on S3 resume
  ACPI: resolve merge conflict between sem2mutex and processor_perflib.c
  ACPI: use for_each_possible_cpu() instead of for_each_cpu()
  ACPI: delete newly added debugging macros in processor_perflib.c
  ACPI: UP build fix for bugzilla-5737
  Enable P-state software coordination via _PDC
  P-state software coordination for speedstep-centrino
  P-state software coordination for acpi-cpufreq
  P-state software coordination for ACPI core
  ACPI: create acpi_thermal_resume()
  ACPI: create acpi_fan_suspend()/acpi_fan_resume()
  ACPI: pass pm_message_t from acpi_device_suspend() to root_suspend()
  ACPI: create acpi_device_suspend()/acpi_device_resume()
  ACPI: replace spin_lock_irq with mutex for ec poll mode
  ACPI: Allow a WAN module enable/disable on a Thinkpad X60.
  sem2mutex: acpi, acpi_link_lock
  ACPI: delete unused acpi_bus_drivers_lock
  sem2mutex: drivers/acpi/processor_perflib.c
  ACPI add ia64 exports to build acpi_memhotplug as a module
  ACPI: asus_acpi_init(): propagate correct return value
  ...

Manual resolve of conflicts in:

	arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
	arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
	include/acpi/processor.h
This commit is contained in:
Linus Torvalds
2006-06-23 07:52:36 -07:00
177 changed files with 6455 additions and 5134 deletions
+3
View File
@@ -147,6 +147,9 @@ running once the system is up.
acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA
Format: <irq>,<irq>...
acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
Format: To spoof as Windows 98: ="Microsoft Windows"
acpi_osi= [HW,ACPI] empty param disables _OSI
acpi_serialize [HW,ACPI] force serialization of AML methods
+4 -4
View File
@@ -217,7 +217,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_madt *madt = NULL;
if (!phys_addr || !size)
if (!phys_addr || !size || !cpu_has_apic)
return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@@ -623,9 +623,9 @@ extern u32 pmtmr_ioport;
static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
{
struct fadt_descriptor_rev2 *fadt = NULL;
struct fadt_descriptor *fadt = NULL;
fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
if (!fadt) {
printk(KERN_WARNING PREFIX "Unable to map FADT\n");
return 0;
@@ -756,7 +756,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
return -ENODEV;
}
if (!cpu_has_apic)
if (!cpu_has_apic)
return -ENODEV;
/*
+1 -1
View File
@@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
+202 -89
View File
@@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume;
};
static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -104,64 +105,43 @@ acpi_processor_set_performance (
{
u16 port = 0;
u8 bit_width = 0;
int ret;
u32 value = 0;
int i = 0;
struct cpufreq_freqs cpufreq_freqs;
cpumask_t saved_mask;
int ret = 0;
u32 value = 0;
int retval;
struct acpi_processor_performance *perf;
dprintk("acpi_processor_set_performance\n");
/*
* TBD: Use something other than set_cpus_allowed.
* As set_cpus_allowed is a bit racy,
* with any other set_cpus_allowed for this process.
*/
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) {
return (-EAGAIN);
}
if (state == data->acpi_data.state) {
retval = 0;
perf = data->acpi_data;
if (state == perf->state) {
if (unlikely(data->resume)) {
dprintk("Called after resume, resetting to P%d\n", state);
data->resume = 0;
} else {
dprintk("Already at target state (P%d)\n", state);
retval = 0;
goto migrate_end;
return (retval);
}
}
dprintk("Transitioning from P%d to P%d\n",
data->acpi_data.state, state);
/* cpufreq frequency struct */
cpufreq_freqs.cpu = cpu;
cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
cpufreq_freqs.new = data->freq_table[state].frequency;
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
dprintk("Transitioning from P%d to P%d\n", perf->state, state);
/*
* First we write the target state's 'control' value to the
* control_register.
*/
port = data->acpi_data.control_register.address;
bit_width = data->acpi_data.control_register.bit_width;
value = (u32) data->acpi_data.states[state].control;
port = perf->control_register.address;
bit_width = perf->control_register.bit_width;
value = (u32) perf->states[state].control;
dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
ret = acpi_processor_write_port(port, bit_width, value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
/*
@@ -177,48 +157,35 @@ acpi_processor_set_performance (
* before giving up.
*/
port = data->acpi_data.status_register.address;
bit_width = data->acpi_data.status_register.bit_width;
port = perf->status_register.address;
bit_width = perf->status_register.bit_width;
dprintk("Looking for 0x%08x from port 0x%04x\n",
(u32) data->acpi_data.states[state].status, port);
(u32) perf->states[state].status, port);
for (i=0; i<100; i++) {
for (i = 0; i < 100; i++) {
ret = acpi_processor_read_port(port, bit_width, &value);
if (ret) {
dprintk("Invalid port width 0x%04x\n", bit_width);
retval = ret;
goto migrate_end;
return (ret);
}
if (value == (u32) data->acpi_data.states[state].status)
if (value == (u32) perf->states[state].status)
break;
udelay(10);
}
} else {
value = (u32) data->acpi_data.states[state].status;
value = (u32) perf->states[state].status;
}
/* notify cpufreq */
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
unsigned int tmp = cpufreq_freqs.new;
cpufreq_freqs.new = cpufreq_freqs.old;
cpufreq_freqs.old = tmp;
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
if (unlikely(value != (u32) perf->states[state].status)) {
printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
retval = -ENODEV;
goto migrate_end;
return (retval);
}
dprintk("Transition successful after %d microseconds\n", i * 10);
data->acpi_data.state = state;
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
perf->state = state;
return (retval);
}
@@ -230,8 +197,17 @@ acpi_cpufreq_target (
unsigned int relation)
{
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
struct acpi_processor_performance *perf;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
cpumask_t set_mask;
cpumask_t covered_cpus;
unsigned int cur_state = 0;
unsigned int next_state = 0;
unsigned int result = 0;
unsigned int j;
unsigned int tmp;
dprintk("acpi_cpufreq_setpolicy\n");
@@ -240,11 +216,95 @@ acpi_cpufreq_target (
target_freq,
relation,
&next_state);
if (result)
if (unlikely(result))
return (result);
result = acpi_processor_set_performance (data, policy->cpu, next_state);
perf = data->acpi_data;
cur_state = perf->state;
freqs.old = data->freq_table[cur_state].frequency;
freqs.new = data->freq_table[next_state].frequency;
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
/*
* We need to call driver->target() on all or any CPU in
* policy->cpus, depending on policy->shared_type.
*/
saved_mask = current->cpus_allowed;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
result = -EAGAIN;
break;
}
result = acpi_processor_set_performance (data, j, next_state);
if (result) {
result = -EAGAIN;
break;
}
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
if (unlikely(result)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to online_policy_cpus and
* acpi_processor_set_performance() has been called on
* coverd_cpus. Best effort undo..
*/
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
policy->cpu = j;
acpi_processor_set_performance (data,
j,
cur_state);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
set_cpus_allowed(current, saved_mask);
return (result);
}
@@ -270,30 +330,65 @@ acpi_cpufreq_guess_freq (
struct cpufreq_acpi_io *data,
unsigned int cpu)
{
struct acpi_processor_performance *perf = data->acpi_data;
if (cpu_khz) {
/* search the closest match to cpu_khz */
unsigned int i;
unsigned long freq;
unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000;
unsigned long freqn = perf->states[0].core_frequency * 1000;
for (i=0; i < (data->acpi_data.state_count - 1); i++) {
for (i = 0; i < (perf->state_count - 1); i++) {
freq = freqn;
freqn = data->acpi_data.states[i+1].core_frequency * 1000;
freqn = perf->states[i+1].core_frequency * 1000;
if ((2 * cpu_khz) > (freqn + freq)) {
data->acpi_data.state = i;
perf->state = i;
return (freq);
}
}
data->acpi_data.state = data->acpi_data.state_count - 1;
perf->state = perf->state_count - 1;
return (freqn);
} else
} else {
/* assume CPU is at P0... */
data->acpi_data.state = 0;
return data->acpi_data.states[0].core_frequency * 1000;
perf->state = 0;
return perf->states[0].core_frequency * 1000;
}
}
/*
* acpi_cpufreq_early_init - initialize ACPI P-States library
*
* Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
* in order to determine correct frequency and voltage pairings. We can
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
static int acpi_cpufreq_early_init_acpi(void)
{
struct acpi_processor_performance *data;
unsigned int i, j;
dprintk("acpi_cpufreq_early_init\n");
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
/* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
@@ -303,41 +398,51 @@ acpi_cpufreq_cpu_init (
struct cpufreq_acpi_io *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
struct acpi_processor_performance *perf;
dprintk("acpi_cpufreq_cpu_init\n");
if (!acpi_perf_data[cpu])
return (-ENODEV);
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
data->acpi_data = acpi_perf_data[cpu];
acpi_io_data[cpu] = data;
result = acpi_processor_register_performance(&data->acpi_data, cpu);
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
goto err_free;
perf = data->acpi_data;
policy->cpus = perf->shared_cpu_map;
policy->shared_type = perf->shared_type;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
}
/* capability check */
if (data->acpi_data.state_count <= 1) {
if (perf->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
(perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
dprintk("Unsupported address space [%d, %d]\n",
(u32) (data->acpi_data.control_register.space_id),
(u32) (data->acpi_data.status_register.space_id));
(u32) (perf->control_register.space_id),
(u32) (perf->status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -345,9 +450,9 @@ acpi_cpufreq_cpu_init (
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i=0; i<data->acpi_data.state_count; i++) {
if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000;
for (i=0; i<perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
@@ -355,11 +460,11 @@ acpi_cpufreq_cpu_init (
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
/* table init */
for (i=0; i<=data->acpi_data.state_count; i++)
for (i=0; i<=perf->state_count; i++)
{
data->freq_table[i].index = i;
if (i<data->acpi_data.state_count)
data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
if (i<perf->state_count)
data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
else
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
@@ -374,12 +479,12 @@ acpi_cpufreq_cpu_init (
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
for (i = 0; i < perf->state_count; i++)
dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
(i == data->acpi_data.state?'*':' '), i,
(u32) data->acpi_data.states[i].core_frequency,
(u32) data->acpi_data.states[i].power,
(u32) data->acpi_data.states[i].transition_latency);
(i == perf->state?'*':' '), i,
(u32) perf->states[i].core_frequency,
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
@@ -394,7 +499,7 @@ acpi_cpufreq_cpu_init (
err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu);
acpi_processor_unregister_performance(perf, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
@@ -415,7 +520,7 @@ acpi_cpufreq_cpu_exit (
if (data) {
cpufreq_frequency_table_put_attr(policy->cpu);
acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data, policy->cpu);
acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
kfree(data);
}
@@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
dprintk("acpi_cpufreq_init\n");
result = cpufreq_register_driver(&acpi_cpufreq_driver);
result = acpi_cpufreq_early_init_acpi();
if (!result)
result = cpufreq_register_driver(&acpi_cpufreq_driver);
return (result);
}
@@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
static void __exit
acpi_cpufreq_exit (void)
{
unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
for_each_cpu(i) {
kfree(acpi_perf_data[i]);
acpi_perf_data[i] = NULL;
}
return;
}
+189 -71
View File
@@ -347,7 +347,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
static struct acpi_processor_performance p;
static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
/*
* centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
* library
*
* Before doing the actual init, we need to do _PSD related setup whenever
* supported by the BIOS. These are handled by this early_init routine.
*/
static int centrino_cpu_early_init_acpi(void)
{
unsigned int i, j;
struct acpi_processor_performance *data;
for_each_cpu(i) {
data = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
if (!data) {
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
return (-ENOMEM);
}
acpi_perf_data[i] = data;
}
acpi_processor_preregister_performance(acpi_perf_data);
return 0;
}
/*
* centrino_cpu_init_acpi - register with ACPI P-States library
@@ -361,46 +390,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
unsigned long cur_freq;
int result = 0, i;
unsigned int cpu = policy->cpu;
struct acpi_processor_performance *p;
p = acpi_perf_data[cpu];
/* register with ACPI core */
if (acpi_processor_register_performance(&p, cpu)) {
dprintk("obtaining ACPI data failed\n");
if (acpi_processor_register_performance(p, cpu)) {
dprintk(PFX "obtaining ACPI data failed\n");
return -EIO;
}
policy->cpus = p->shared_cpu_map;
policy->shared_type = p->shared_type;
/* verify the acpi_data */
if (p.state_count <= 1) {
if (p->state_count <= 1) {
dprintk("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
(p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
dprintk("Invalid control/status registers (%x - %x)\n",
p.control_register.space_id, p.status_register.space_id);
p->control_register.space_id, p->status_register.space_id);
result = -EIO;
goto err_unreg;
}
for (i=0; i<p.state_count; i++) {
if (p.states[i].control != p.states[i].status) {
for (i=0; i<p->state_count; i++) {
if (p->states[i].control != p->states[i].status) {
dprintk("Different control (%llu) and status values (%llu)\n",
p.states[i].control, p.states[i].status);
p->states[i].control, p->states[i].status);
result = -EINVAL;
goto err_unreg;
}
if (!p.states[i].core_frequency) {
if (!p->states[i].core_frequency) {
dprintk("Zero core frequency for state %u\n", i);
result = -EINVAL;
goto err_unreg;
}
if (p.states[i].core_frequency > p.states[0].core_frequency) {
if (p->states[i].core_frequency > p->states[0].core_frequency) {
dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
p.states[i].core_frequency, p.states[0].core_frequency);
p.states[i].core_frequency = 0;
p->states[i].core_frequency, p->states[0].core_frequency);
p->states[i].core_frequency = 0;
continue;
}
}
@@ -412,26 +446,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
centrino_model[cpu]->model_name=NULL;
centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
(p.state_count + 1), GFP_KERNEL);
(p->state_count + 1), GFP_KERNEL);
if (!centrino_model[cpu]->op_points) {
result = -ENOMEM;
goto err_kfree;
}
for (i=0; i<p.state_count; i++) {
centrino_model[cpu]->op_points[i].index = p.states[i].control;
centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000;
for (i=0; i<p->state_count; i++) {
centrino_model[cpu]->op_points[i].index = p->states[i].control;
centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
dprintk("adding state %i with frequency %u and control value %04x\n",
i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
}
centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END;
centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
cur_freq = get_cur_freq(cpu);
for (i=0; i<p.state_count; i++) {
if (!p.states[i].core_frequency) {
for (i=0; i<p->state_count; i++) {
if (!p->states[i].core_frequency) {
dprintk("skipping state %u\n", i);
centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
continue;
@@ -447,7 +481,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
p.state = i;
p->state = i;
}
/* notify BIOS that we exist */
@@ -460,12 +494,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
err_kfree:
kfree(centrino_model[cpu]);
err_unreg:
acpi_processor_unregister_performance(&p, cpu);
dprintk("invalid ACPI data\n");
acpi_processor_unregister_performance(p, cpu);
dprintk(PFX "invalid ACPI data\n");
return (result);
}
#else
static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
static inline int centrino_cpu_early_init_acpi(void) { return 0; }
#endif
static int centrino_cpu_init(struct cpufreq_policy *policy)
@@ -551,10 +586,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
if (!centrino_model[cpu]->model_name) {
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(&p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
static struct acpi_processor_performance *p;
if (acpi_perf_data[cpu]) {
p = acpi_perf_data[cpu];
dprintk("unregistering and freeing ACPI data\n");
acpi_processor_unregister_performance(p, cpu);
kfree(centrino_model[cpu]->op_points);
kfree(centrino_model[cpu]);
}
}
#endif
@@ -588,63 +628,128 @@ static int centrino_target (struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
unsigned int msr, oldmsr, h, cpu = policy->cpu;
unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
struct cpufreq_freqs freqs;
cpumask_t online_policy_cpus;
cpumask_t saved_mask;
int retval;
cpumask_t set_mask;
cpumask_t covered_cpus;
int retval = 0;
unsigned int j, k, first_cpu, tmp;
if (centrino_model[cpu] == NULL)
if (unlikely(centrino_model[cpu] == NULL))
return -ENODEV;
/*
* Support for SMP systems.
* Make sure we are running on the CPU that wants to change frequency
*/
if (unlikely(cpufreq_frequency_table_target(policy,
centrino_model[cpu]->op_points,
target_freq,
relation,
&newstate))) {
return -EINVAL;
}
#ifdef CONFIG_HOTPLUG_CPU
/* cpufreq holds the hotplug lock, so we are safe from here on */
cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
#else
online_policy_cpus = policy->cpus;
#endif
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, policy->cpus);
if (!cpu_isset(smp_processor_id(), policy->cpus)) {
dprintk("couldn't limit to CPUs in this domain\n");
return(-EAGAIN);
first_cpu = 1;
cpus_clear(covered_cpus);
for_each_cpu_mask(j, online_policy_cpus) {
/*
* Support for SMP systems.
* Make sure we are running on CPU that wants to change freq
*/
cpus_clear(set_mask);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
cpus_or(set_mask, set_mask, online_policy_cpus);
else
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
if (first_cpu) {
/* We haven't started the transition yet. */
goto migrate_end;
}
break;
}
msr = centrino_model[cpu]->op_points[newstate].index;
if (first_cpu) {
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr);
retval = 0;
goto migrate_end;
}
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs,
CPUFREQ_PRECHANGE);
}
first_cpu = 0;
/* all but 16 LSB are reserved, treat them with care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
}
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
break;
cpu_set(j, covered_cpus);
}
if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq,
relation, &newstate)) {
retval = -EINVAL;
goto migrate_end;
for_each_cpu_mask(k, online_policy_cpus) {
freqs.cpu = k;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
msr = centrino_model[cpu]->op_points[newstate].index;
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
if (unlikely(retval)) {
/*
* We have failed halfway through the frequency change.
* We have sent callbacks to policy->cpus and
* MSRs have already been written on coverd_cpus.
* Best effort undo..
*/
if (msr == (oldmsr & 0xffff)) {
retval = 0;
dprintk("no change needed - msr was and needs to be %x\n", oldmsr);
goto migrate_end;
if (!cpus_empty(covered_cpus)) {
for_each_cpu_mask(j, covered_cpus) {
set_cpus_allowed(current, cpumask_of_cpu(j));
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
}
tmp = freqs.new;
freqs.new = freqs.old;
freqs.old = tmp;
for_each_cpu_mask(j, online_policy_cpus) {
freqs.cpu = j;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
freqs.cpu = cpu;
freqs.old = extract_clock(oldmsr, cpu, 0);
freqs.new = extract_clock(msr, cpu, 0);
dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
target_freq, freqs.old, freqs.new, msr);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* all but 16 LSB are "reserved", so treat them with
care */
oldmsr &= ~0xffff;
msr &= 0xffff;
oldmsr |= msr;
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
retval = 0;
migrate_end:
set_cpus_allowed(current, saved_mask);
return (retval);
return 0;
}
static struct freq_attr* centrino_attr[] = {
@@ -686,12 +791,25 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
centrino_cpu_early_init_acpi();
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
unsigned int j;
#endif
cpufreq_unregister_driver(&centrino_driver);
#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
for_each_cpu(j) {
kfree(acpi_perf_data[j]);
acpi_perf_data[j] = NULL;
}
#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
+1
View File
@@ -77,6 +77,7 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
select PCI
select NUMA
select ACPI_NUMA
help
+1 -1
View File
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
ACPI_MEM_FREE(dev_info);
kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
+2 -2
View File
@@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
static unsigned int __initdata acpi_madt_rev;
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
@@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
static unsigned int __initdata acpi_madt_rev;
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
+2
View File
@@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size)
return ret;
}
EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
EXPORT_SYMBOL_GPL(remove_memory);
#endif
+1
View File
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
select PCI
select ACPI_NUMA
default y
help
+1
View File
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += processor.o
processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
endif
-72
View File
@@ -1,72 +0,0 @@
/*
* arch/x86_64/kernel/acpi/processor.c
*
* Copyright (C) 2005 Intel Corporation
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* - Added _PDC for platforms with Intel CPUs
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <asm/acpi.h>
static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
{
struct acpi_object_list *obj_list;
union acpi_object *obj;
u32 *buf;
/* allocate and initialize pdc. It will be used later. */
obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
if (!obj_list) {
printk(KERN_ERR "Memory allocation error\n");
return;
}
obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!obj) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj_list);
return;
}
buf = kmalloc(12, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "Memory allocation error\n");
kfree(obj);
kfree(obj_list);
return;
}
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
obj->buffer.pointer = (u8 *) buf;
obj_list->count = 1;
obj_list->pointer = obj;
pr->pdc = obj_list;
return;
}
/* Initialize _PDC data based on the CPU vendor */
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
{
unsigned int cpu = pr->id;
struct cpuinfo_x86 *c = cpu_data + cpu;
pr->pdc = NULL;
if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
init_intel_pdc(pr, c);
return;
}
EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
+1 -2
View File
@@ -10,9 +10,8 @@ menu "ACPI (Advanced Configuration and Power Interface) Support"
config ACPI
bool "ACPI Support"
depends on IA64 || X86
depends on PCI
select PM
select PCI
default y
---help---
Advanced Configuration and Power Interface (ACPI) support for
+5 -7
View File
@@ -74,7 +74,7 @@ struct acpi_memory_device {
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
u64 start_addr; /* Memory Range start physical addr */
u64 end_addr; /* Memory Range end physical addr */
u64 length; /* Memory Range length */
};
static int
@@ -97,12 +97,11 @@ acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
if (ACPI_SUCCESS(status)) {
if (address64.resource_type == ACPI_MEMORY_RANGE) {
/* Populate the structure */
mem_device->caching =
address64.info.mem.caching;
mem_device->caching = address64.info.mem.caching;
mem_device->write_protect =
address64.info.mem.write_protect;
mem_device->start_addr = address64.minimum;
mem_device->end_addr = address64.maximum;
mem_device->length = address64.address_length;
}
}
@@ -199,8 +198,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
*/
result = add_memory(mem_device->start_addr,
(mem_device->end_addr - mem_device->start_addr) + 1);
result = add_memory(mem_device->start_addr, mem_device->length);
if (result) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
@@ -249,7 +247,7 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
u64 start = mem_device->start_addr;
u64 len = mem_device->end_addr - start + 1;
u64 len = mem_device->length;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
+24 -8
View File
@@ -817,7 +817,7 @@ typedef int (proc_writefunc) (struct file * file, const char __user * buffer,
unsigned long count, void *data);
static int
__init asus_proc_add(char *name, proc_writefunc * writefunc,
asus_proc_add(char *name, proc_writefunc * writefunc,
proc_readfunc * readfunc, mode_t mode,
struct acpi_device *device)
{
@@ -836,7 +836,7 @@ __init asus_proc_add(char *name, proc_writefunc * writefunc,
return 0;
}
static int __init asus_hotk_add_fs(struct acpi_device *device)
static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
mode_t mode;
@@ -954,7 +954,7 @@ static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
* This function is used to initialize the hotk with right values. In this
* method, we can make all the detection we want, and modify the hotk struct
*/
static int __init asus_hotk_get_info(void)
static int asus_hotk_get_info(void)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -970,7 +970,7 @@ static int __init asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains
* the DSDT header.
*/
status = acpi_get_table(ACPI_TABLE_DSDT, 1, &dsdt);
status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt);
if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
@@ -1101,7 +1101,7 @@ static int __init asus_hotk_get_info(void)
return AE_OK;
}
static int __init asus_hotk_check(void)
static int asus_hotk_check(void)
{
int result = 0;
@@ -1119,7 +1119,9 @@ static int __init asus_hotk_check(void)
return result;
}
static int __init asus_hotk_add(struct acpi_device *device)
static int asus_hotk_found;
static int asus_hotk_add(struct acpi_device *device)
{
acpi_status status = AE_OK;
int result;
@@ -1180,6 +1182,8 @@ static int __init asus_hotk_add(struct acpi_device *device)
}
}
asus_hotk_found = 1;
end:
if (result) {
kfree(hotk);
@@ -1226,12 +1230,24 @@ static int __init asus_acpi_init(void)
asus_proc_dir->owner = THIS_MODULE;
result = acpi_bus_register_driver(&asus_hotk_driver);
if (result < 1) {
acpi_bus_unregister_driver(&asus_hotk_driver);
if (result < 0) {
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return -ENODEV;
}
/*
* This is a bit of a kludge. We only want this module loaded
* for ASUS systems, but there's currently no way to probe the
* ACPI namespace for ASUS HIDs. So we just return failure if
* we didn't find one, which will cause the module to be
* unloaded.
*/
if (!asus_hotk_found) {
acpi_bus_unregister_driver(&asus_hotk_driver);
remove_proc_entry(PROC_ASUS, acpi_root_dir);
return result;
}
return 0;
}
+12 -10
View File
@@ -43,7 +43,7 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif
FADT_DESCRIPTOR acpi_fadt;
struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root;
@@ -205,12 +205,14 @@ int acpi_bus_set_power(acpi_handle handle, int state)
* Get device's current power state if it's unknown
* This means device power state isn't initialized or previous setting failed
*/
if (device->power.state == ACPI_STATE_UNKNOWN)
acpi_bus_get_power(device->handle, &device->power.state);
if (state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
return_VALUE(0);
if (!device->flags.force_power_state) {
if (device->power.state == ACPI_STATE_UNKNOWN)
acpi_bus_get_power(device->handle, &device->power.state);
if (state == device->power.state) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
state));
return_VALUE(0);
}
}
if (!device->power.states[state].flags.valid) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Device does not support D%d\n",
@@ -596,6 +598,8 @@ void __init acpi_early_init(void)
if (acpi_disabled)
return_VOID;
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
@@ -617,7 +621,7 @@ void __init acpi_early_init(void)
/*
* Get a separate copy of the FADT for use by other drivers.
*/
status = acpi_get_table(ACPI_TABLE_FADT, 1, &buffer);
status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
@@ -743,8 +747,6 @@ static int __init acpi_init(void)
ACPI_FUNCTION_TRACE("acpi_init");
printk(KERN_INFO PREFIX "Subsystem revision %08x\n", ACPI_CA_VERSION);
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
return_VALUE(-ENODEV);
+7 -6
View File
@@ -87,7 +87,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
union acpi_operand_object *second_desc = NULL;
u32 flags;
ACPI_FUNCTION_TRACE("ds_create_buffer_field");
ACPI_FUNCTION_TRACE(ds_create_buffer_field);
/* Get the name_string argument */
@@ -210,7 +210,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
acpi_status status;
acpi_integer position;
ACPI_FUNCTION_TRACE_PTR("ds_get_field_names", info);
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
/* First field starts at bit zero */
@@ -342,7 +342,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -399,7 +399,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_namespace_node *node;
u8 type = 0;
ACPI_FUNCTION_TRACE_PTR("ds_init_field_objects", op);
ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
switch (walk_state->opcode) {
case AML_FIELD_OP:
@@ -425,6 +425,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
* Walk the list of entries in the field_list
*/
while (arg) {
/* Ignore OFFSET and ACCESSAS terms here */
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
@@ -481,7 +482,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_bank_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_bank_field, op);
/* First arg is the name of the parent op_region (must already exist) */
@@ -554,7 +555,7 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
union acpi_parse_object *arg;
struct acpi_create_field_info info;
ACPI_FUNCTION_TRACE_PTR("ds_create_index_field", op);
ACPI_FUNCTION_TRACE_PTR(ds_create_index_field, op);
/* First arg is the name of the Index register (must already exist) */
+3 -3
View File
@@ -184,7 +184,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
*
* RETURN: Status
*
* DESCRIPTION: Walk the namespace starting at "start_node" and perform any
* DESCRIPTION: Walk the namespace starting at "StartNode" and perform any
* necessary initialization on the objects found therein
*
******************************************************************************/
@@ -196,7 +196,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
acpi_status status;
struct acpi_init_walk_info info;
ACPI_FUNCTION_TRACE("ds_initialize_objects");
ACPI_FUNCTION_TRACE(ds_initialize_objects);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n"));
@@ -213,7 +213,7 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
status = acpi_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
acpi_ds_init_one_object, &info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During walk_namespace"));
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+142 -90
View File
@@ -81,6 +81,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
/* Invoke the global exception handler */
if (acpi_gbl_exception_handler) {
/* Exit the interpreter, allow handler to execute methods */
acpi_ex_exit_interpreter();
@@ -100,6 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
}
#ifdef ACPI_DISASSEMBLER
if (ACPI_FAILURE(status)) {
/* Display method locals/args if disassembler is present */
acpi_dm_dump_method_info(status, walk_state, walk_state->op);
@@ -132,7 +134,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR("ds_begin_method_execution", method_node);
ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
if (!method_node) {
return_ACPI_STATUS(AE_NULL_ENTRY);
@@ -168,11 +170,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
/*
* Get a unit from the method semaphore. This releases the
* interpreter if we block
* interpreter if we block (then reacquires it)
*/
status =
acpi_ex_system_wait_semaphore(obj_desc->method.semaphore,
ACPI_WAIT_FOREVER);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
@@ -183,7 +188,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
if (!obj_desc->method.owner_id) {
status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
goto cleanup;
}
}
@@ -193,6 +198,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,
*/
obj_desc->method.thread_count++;
return_ACPI_STATUS(status);
cleanup:
/* On error, must signal the method semaphore if present */
if (obj_desc->method.semaphore) {
(void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1);
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
@@ -218,10 +231,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_namespace_node *method_node;
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_parameter_info info;
struct acpi_evaluate_info *info;
u32 i;
ACPI_FUNCTION_TRACE_PTR("ds_call_control_method", this_walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n",
@@ -240,25 +253,31 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT);
}
/* Init for new method, wait on concurrency semaphore */
/* Init for new method, possibly wait on concurrency semaphore */
status = acpi_ds_begin_method_execution(method_node, obj_desc,
this_walk_state->method_node);
if (ACPI_FAILURE(status)) {
goto cleanup;
return_ACPI_STATUS(status);
}
/*
* 1) Parse the method. All "normal" methods are parsed for each execution.
* Internal methods (_OSI, etc.) do not require parsing.
*/
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
/* 1) Parse: Create a new walk state for the preempting walk */
/* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
return_ACPI_STATUS(AE_NO_MEMORY);
status = AE_NO_MEMORY;
goto cleanup;
}
/* Create and init a Root Node */
/* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
@@ -271,17 +290,20 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(next_walk_state);
acpi_ps_delete_parse_tree(op);
goto cleanup;
}
/* Begin AML parse */
/* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/* 2) Execute: Create a new state for the preempting walk */
/* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread);
@@ -289,6 +311,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = AE_NO_MEMORY;
goto cleanup;
}
/*
* The resolved arguments were put on the previous walk state's operand
* stack. Operands on the previous walk state stack always
@@ -296,12 +319,24 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
*/
this_walk_state->operands[this_walk_state->num_operands] = NULL;
info.parameters = &this_walk_state->operands[0];
info.parameter_type = ACPI_PARAM_ARGS;
/*
* Allocate and initialize the evaluation information block
* TBD: this is somewhat inefficient, should change interface to
* ds_init_aml_walk. For now, keeps this struct off the CPU stack
*/
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->parameters = &this_walk_state->operands[0];
info->parameter_type = ACPI_PARAM_ARGS;
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start,
obj_desc->method.aml_length, &info, 3);
obj_desc->method.aml_length, info, 3);
ACPI_FREE(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
@@ -323,6 +358,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"Starting nested execution, newstate=%p\n",
next_walk_state));
/* Invoke an internal method if necessary */
if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
status = obj_desc->method.implementation(next_walk_state);
}
@@ -330,16 +367,14 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status);
cleanup:
/* Decrement the thread count on the method parse tree */
if (next_walk_state && (next_walk_state->method_desc)) {
next_walk_state->method_desc->method.thread_count--;
/* On error, we must terminate the method properly */
acpi_ds_terminate_control_method(obj_desc, next_walk_state);
if (next_walk_state) {
acpi_ds_delete_walk_state(next_walk_state);
}
/* On error, we must delete the new walk state */
acpi_ds_terminate_control_method(next_walk_state);
acpi_ds_delete_walk_state(next_walk_state);
return_ACPI_STATUS(status);
}
@@ -362,25 +397,33 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
union acpi_operand_object *return_desc)
{
acpi_status status;
int same_as_implicit_return;
ACPI_FUNCTION_TRACE_PTR("ds_restart_control_method", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"****Restart [%4.4s] Op %p return_value_from_callee %p\n",
"****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
(char *)&walk_state->method_node->name,
walk_state->method_call_op, return_desc));
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
" return_from_this_method_used?=%X res_stack %p Walk %p\n",
" ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
walk_state->return_used,
walk_state->results, walk_state));
/* Did the called method return a value? */
if (return_desc) {
/* Is the implicit return object the same as the return desc? */
same_as_implicit_return =
(walk_state->implicit_return_obj == return_desc);
/* Are we actually going to use the return value? */
if (walk_state->return_used) {
/* Save the return value from the previous method */
status = acpi_ds_result_push(return_desc, walk_state);
@@ -397,18 +440,23 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
}
/*
* The following code is the
* optional support for a so-called "implicit return". Some AML code
* assumes that the last value of the method is "implicitly" returned
* to the caller. Just save the last result as the return value.
* The following code is the optional support for the so-called
* "implicit return". Some AML code assumes that the last value of the
* method is "implicitly" returned to the caller, in the absence of an
* explicit return value.
*
* Just save the last result of the method as the return value.
*
* NOTE: this is optional because the ASL language does not actually
* support this behavior.
*/
else if (!acpi_ds_do_implicit_return
(return_desc, walk_state, FALSE)) {
(return_desc, walk_state, FALSE)
|| same_as_implicit_return) {
/*
* Delete the return value if it will not be used by the
* calling method
* calling method or remove one reference if the explicit return
* is the same as the implicit return value.
*/
acpi_ut_remove_reference(return_desc);
}
@@ -421,7 +469,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_terminate_control_method
*
* PARAMETERS: walk_state - State of the method
* PARAMETERS: method_desc - Method object
* walk_state - State associated with the method
*
* RETURN: None
*
@@ -431,95 +480,100 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
*
******************************************************************************/
void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
void
acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *method_node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR("ds_terminate_control_method", walk_state);
ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
if (!walk_state) {
/* method_desc is required, walk_state is optional */
if (!method_desc) {
return_VOID;
}
/* The current method object was saved in the walk state */
if (walk_state) {
obj_desc = walk_state->method_desc;
if (!obj_desc) {
return_VOID;
/* Delete all arguments and locals */
acpi_ds_method_data_delete_all(walk_state);
}
/* Delete all arguments and locals */
acpi_ds_method_data_delete_all(walk_state);
/*
* Lock the parser while we terminate this method.
* If this is the last thread executing the method,
* we have additional cleanup to perform
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER);
status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Signal completion of the execution of this method if necessary */
if (walk_state->method_desc->method.semaphore) {
if (method_desc->method.semaphore) {
status =
acpi_os_signal_semaphore(walk_state->method_desc->method.
semaphore, 1);
acpi_os_signal_semaphore(method_desc->method.semaphore, 1);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO,
"Could not signal method semaphore"));
/* Ignore error and continue cleanup */
/* Ignore error and continue */
ACPI_EXCEPTION((AE_INFO, status,
"Could not signal method semaphore"));
}
}
/*
* There are no more threads executing this method. Perform
* additional cleanup.
*
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
if (walk_state) {
/*
* Delete any objects created by this method during execution.
* The method Node is stored in the walk state
*/
method_node = walk_state->method_node;
/* Lock namespace for possible update */
/* Lock namespace for possible update */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node && method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
}
/*
* Delete any namespace entries created immediately underneath
* the method
*/
if (method_node->child) {
acpi_ns_delete_namespace_subtree(method_node);
}
/* Decrement the thread count on the method */
/*
* Delete any namespace entries created anywhere else within
* the namespace by the execution of this method
*/
acpi_ns_delete_namespace_by_owner(walk_state->method_desc->method.
owner_id);
status = acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (method_desc->method.thread_count) {
method_desc->method.thread_count--;
} else {
ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
}
/* Are there any other threads currently executing this method? */
if (walk_state->method_desc->method.thread_count) {
if (method_desc->method.thread_count) {
/*
* Additional threads. Do not release the owner_id in this case,
* we immediately reuse it for the next thread executing this method
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"*** Completed execution of one thread, %d threads remaining\n",
walk_state->method_desc->method.
thread_count));
method_desc->method.thread_count));
} else {
/* This is the only executing thread for this method */
@@ -533,22 +587,20 @@ void acpi_ds_terminate_control_method(struct acpi_walk_state *walk_state)
* This code is here because we must wait until the last thread exits
* before creating the synchronization semaphore.
*/
if ((walk_state->method_desc->method.concurrency == 1) &&
(!walk_state->method_desc->method.semaphore)) {
if ((method_desc->method.concurrency == 1) &&
(!method_desc->method.semaphore)) {
status = acpi_os_create_semaphore(1, 1,
&walk_state->
method_desc->method.
&method_desc->method.
semaphore);
}
/* No more threads, we can free the owner_id */
acpi_ut_release_owner_id(&walk_state->method_desc->method.
owner_id);
acpi_ut_release_owner_id(&method_desc->method.owner_id);
}
exit:
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER);
(void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID;
}
@@ -581,7 +633,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE_PTR("ds_parse_method", node);
ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node);
/* Parameter Validation */
@@ -590,7 +642,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** Parsing [%4.4s] **** named_obj=%p\n",
"**** Parsing [%4.4s] **** NamedObj=%p\n",
acpi_ut_get_node_name(node), node));
/* Extract the method object from the method Node */
@@ -669,7 +721,7 @@ acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node)
}
ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
"**** [%4.4s] Parsed **** named_obj=%p Op=%p\n",
"**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n",
acpi_ut_get_node_name(node), node, op));
/*
+22 -21
View File
@@ -81,7 +81,7 @@ acpi_ds_method_data_get_type(u16 opcode,
* special data types.
*
* NOTES: walk_state fields are initialized to zero by the
* ACPI_MEM_CALLOCATE().
* ACPI_ALLOCATE_ZEROED().
*
* A pseudo-Namespace Node is assigned to each argument and local
* so that ref_of() can return a pointer to the Node.
@@ -92,7 +92,7 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
{
u32 i;
ACPI_FUNCTION_TRACE("ds_method_data_init");
ACPI_FUNCTION_TRACE(ds_method_data_init);
/* Init the method arguments */
@@ -100,10 +100,10 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
ACPI_MOVE_32_TO_32(&walk_state->arguments[i].name,
NAMEOF_ARG_NTE);
walk_state->arguments[i].name.integer |= (i << 24);
walk_state->arguments[i].descriptor = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].descriptor_type = ACPI_DESC_TYPE_NAMED;
walk_state->arguments[i].type = ACPI_TYPE_ANY;
walk_state->arguments[i].flags = ANOBJ_END_OF_PEER_LIST |
ANOBJ_METHOD_ARG;
walk_state->arguments[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_ARG;
}
/* Init the method locals */
@@ -113,11 +113,11 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state)
NAMEOF_LOCAL_NTE);
walk_state->local_variables[i].name.integer |= (i << 24);
walk_state->local_variables[i].descriptor =
walk_state->local_variables[i].descriptor_type =
ACPI_DESC_TYPE_NAMED;
walk_state->local_variables[i].type = ACPI_TYPE_ANY;
walk_state->local_variables[i].flags = ANOBJ_END_OF_PEER_LIST |
ANOBJ_METHOD_LOCAL;
walk_state->local_variables[i].flags =
ANOBJ_END_OF_PEER_LIST | ANOBJ_METHOD_LOCAL;
}
return_VOID;
@@ -140,7 +140,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
{
u32 index;
ACPI_FUNCTION_TRACE("ds_method_data_delete_all");
ACPI_FUNCTION_TRACE(ds_method_data_delete_all);
/* Detach the locals */
@@ -199,7 +199,7 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
acpi_status status;
u32 index = 0;
ACPI_FUNCTION_TRACE_PTR("ds_method_data_init_args", params);
ACPI_FUNCTION_TRACE_PTR(ds_method_data_init_args, params);
if (!params) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -251,7 +251,7 @@ acpi_ds_method_data_get_node(u16 opcode,
struct acpi_walk_state *walk_state,
struct acpi_namespace_node **node)
{
ACPI_FUNCTION_TRACE("ds_method_data_get_node");
ACPI_FUNCTION_TRACE(ds_method_data_get_node);
/*
* Method Locals and Arguments are supported
@@ -318,10 +318,10 @@ acpi_ds_method_data_set_value(u16 opcode,
acpi_status status;
struct acpi_namespace_node *node;
ACPI_FUNCTION_TRACE("ds_method_data_set_value");
ACPI_FUNCTION_TRACE(ds_method_data_set_value);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"new_obj %p Opcode %X, Refs=%d [%s]\n", object,
"NewObj %p Opcode %X, Refs=%d [%s]\n", object,
opcode, object->common.reference_count,
acpi_ut_get_type_name(object->common.type)));
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
* (See ACPI specification 2.0_c)
* (See ACPI Specification 2.0_c)
*/
acpi_ut_add_reference(object);
@@ -351,7 +351,7 @@ acpi_ds_method_data_set_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_get_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
* Index - which local_var or argument to get
* Index - Which local_var or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
@@ -372,7 +372,7 @@ acpi_ds_method_data_get_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_get_value");
ACPI_FUNCTION_TRACE(ds_method_data_get_value);
/* Validate the object descriptor */
@@ -459,7 +459,7 @@ acpi_ds_method_data_get_value(u16 opcode,
* FUNCTION: acpi_ds_method_data_delete_value
*
* PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
* Index - which local_var or argument to delete
* Index - Which local_var or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
@@ -477,7 +477,7 @@ acpi_ds_method_data_delete_value(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_delete_value");
ACPI_FUNCTION_TRACE(ds_method_data_delete_value);
/* Get the namespace node for the arg/local */
@@ -538,7 +538,7 @@ acpi_ds_store_object_to_local(u16 opcode,
union acpi_operand_object *current_obj_desc;
union acpi_operand_object *new_obj_desc;
ACPI_FUNCTION_TRACE("ds_store_object_to_local");
ACPI_FUNCTION_TRACE(ds_store_object_to_local);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Opcode=%X Index=%d Obj=%p\n",
opcode, index, obj_desc));
@@ -614,7 +614,7 @@ acpi_ds_store_object_to_local(u16 opcode,
&& (current_obj_desc->reference.opcode ==
AML_REF_OF_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Arg (%p) is an obj_ref(Node), storing in node %p\n",
"Arg (%p) is an ObjRef(Node), storing in node %p\n",
new_obj_desc,
current_obj_desc));
@@ -688,7 +688,7 @@ acpi_ds_method_data_get_type(u16 opcode,
struct acpi_namespace_node *node;
union acpi_operand_object *object;
ACPI_FUNCTION_TRACE("ds_method_data_get_type");
ACPI_FUNCTION_TRACE(ds_method_data_get_type);
/* Get the namespace node for the arg/local */
@@ -701,6 +701,7 @@ acpi_ds_method_data_get_type(u16 opcode,
object = acpi_ns_get_attached_object(node);
if (!object) {
/* Uninitialized local/arg, return TYPE_ANY */
return_VALUE(ACPI_TYPE_ANY);

Some files were not shown because too many files have changed in this diff Show More