mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'pm-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael Wysocki:
"These rework the collection of cpufreq statistics to allow it to take
place if fast frequency switching is enabled in the governor, rework
the frequency invariance handling in the cpufreq core and drivers, add
new hardware support to a couple of cpufreq drivers, fix a number of
assorted issues and clean up the code all over.
Specifics:
- Rework cpufreq statistics collection to allow it to take place when
fast frequency switching is enabled in the governor (Viresh Kumar).
- Make the cpufreq core set the frequency scale on behalf of the
driver and update several cpufreq drivers accordingly (Ionela
Voinescu, Valentin Schneider).
- Add new hardware support to the STI and qcom cpufreq drivers and
improve them (Alain Volmat, Manivannan Sadhasivam).
- Fix multiple assorted issues in cpufreq drivers (Jon Hunter,
Krzysztof Kozlowski, Matthias Kaehlcke, Pali Rohár, Stephan
Gerhold, Viresh Kumar).
- Fix several assorted issues in the operating performance points
(OPP) framework (Stephan Gerhold, Viresh Kumar).
- Allow devfreq drivers to fetch devfreq instances by DT enumeration
instead of using explicit phandles and modify the devfreq core code
to support driver-specific devfreq DT bindings (Leonard Crestez,
Chanwoo Choi).
- Improve initial hardware resetting in the tegra30 devfreq driver
and clean up the tegra cpuidle driver (Dmitry Osipenko).
- Update the cpuidle core to collect state entry rejection statistics
and expose them via sysfs (Lina Iyer).
- Improve the ACPI _CST code handling diagnostics (Chen Yu).
- Update the PSCI cpuidle driver to allow the PM domain
initialization to occur in the OSI mode as well as in the PC mode
(Ulf Hansson).
- Rework the generic power domains (genpd) core code to allow domain
power off transition to be aborted in the absence of the "power
off" domain callback (Ulf Hansson).
- Fix two suspend-to-idle issues in the ACPI EC driver (Rafael
Wysocki).
- Fix the handling of timer_expires in the PM-runtime framework on
32-bit systems and the handling of device links in it (Grygorii
Strashko, Xiang Chen).
- Add IO requests batching support to the hibernate image saving and
reading code and drop a bogus get_gendisk() from there (Xiaoyi
Chen, Christoph Hellwig).
- Allow PCIe ports to be put into the D3cold power state if they are
power-manageable via ACPI (Lukas Wunner).
- Add missing header file include to a power capping driver (Pujin
Shi).
- Clean up the qcom-cpr AVS driver a bit (Liu Shixin).
- Kevin Hilman steps down as designated reviwer of adaptive voltage
scaling (AVS) drivers (Kevin Hilman)"
* tag 'pm-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (65 commits)
cpufreq: stats: Fix string format specifier mismatch
arm: disable frequency invariance for CONFIG_BL_SWITCHER
cpufreq,arm,arm64: restructure definitions of arch_set_freq_scale()
cpufreq: stats: Add memory barrier to store_reset()
cpufreq: schedutil: Simplify sugov_fast_switch()
ACPI: EC: PM: Drop ec_no_wakeup check from acpi_ec_dispatch_gpe()
ACPI: EC: PM: Flush EC work unconditionally after wakeup
PCI/ACPI: Whitelist hotplug ports for D3 if power managed by ACPI
PM: hibernate: remove the bogus call to get_gendisk() in software_resume()
cpufreq: Move traces and update to policy->cur to cpufreq core
cpufreq: stats: Enable stats for fast-switch as well
cpufreq: stats: Mark few conditionals with unlikely()
cpufreq: stats: Remove locking
cpufreq: stats: Defer stats update to cpufreq_stats_record_transition()
PM: domains: Allow to abort power off when no ->power_off() callback
PM: domains: Rename power state enums for genpd
PM / devfreq: tegra30: Improve initial hardware resetting
PM / devfreq: event: Change prototype of devfreq_event_get_edev_by_phandle function
PM / devfreq: Change prototype of devfreq_get_devfreq_by_phandle function
PM / devfreq: Add devfreq_get_devfreq_by_node function
...
This commit is contained in:
@@ -528,6 +528,10 @@ object corresponding to it, as follows:
|
||||
Total number of times the hardware has been asked by the given CPU to
|
||||
enter this idle state.
|
||||
|
||||
``rejected``
|
||||
Total number of times a request to enter this idle state on the given
|
||||
CPU was rejected.
|
||||
|
||||
The :file:`desc` and :file:`name` files both contain strings. The difference
|
||||
between them is that the name is expected to be more concise, while the
|
||||
description may be longer and it may contain white space or special characters.
|
||||
@@ -572,6 +576,11 @@ particular case. For these reasons, the only reliable way to find out how
|
||||
much time has been spent by the hardware in different idle states supported by
|
||||
it is to use idle state residency counters in the hardware, if available.
|
||||
|
||||
Generally, an interrupt received when trying to enter an idle state causes the
|
||||
idle state entry request to be rejected, in which case the ``CPUIdle`` driver
|
||||
may return an error code to indicate that this was the case. The :file:`usage`
|
||||
and :file:`rejected` files report the number of times the given idle state
|
||||
was entered successfully or rejected, respectively.
|
||||
|
||||
.. _cpu-pm-qos:
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ Properties:
|
||||
- compatible
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be "qcom,cpufreq-hw".
|
||||
Definition: must be "qcom,cpufreq-hw" or "qcom,cpufreq-epss".
|
||||
|
||||
- clocks
|
||||
Usage: required
|
||||
|
||||
@@ -154,25 +154,27 @@ Optional properties:
|
||||
- opp-suspend: Marks the OPP to be used during device suspend. If multiple OPPs
|
||||
in the table have this, the OPP with highest opp-hz will be used.
|
||||
|
||||
- opp-supported-hw: This enables us to select only a subset of OPPs from the
|
||||
larger OPP table, based on what version of the hardware we are running on. We
|
||||
still can't have multiple nodes with the same opp-hz value in OPP table.
|
||||
- opp-supported-hw: This property allows a platform to enable only a subset of
|
||||
the OPPs from the larger set present in the OPP table, based on the current
|
||||
version of the hardware (already known to the operating system).
|
||||
|
||||
It's a user defined array containing a hierarchy of hardware version numbers,
|
||||
supported by the OPP. For example: a platform with hierarchy of three levels
|
||||
of versions (A, B and C), this field should be like <X Y Z>, where X
|
||||
corresponds to Version hierarchy A, Y corresponds to version hierarchy B and Z
|
||||
corresponds to version hierarchy C.
|
||||
Each block present in the array of blocks in this property, represents a
|
||||
sub-group of hardware versions supported by the OPP. i.e. <sub-group A>,
|
||||
<sub-group B>, etc. The OPP will be enabled if _any_ of these sub-groups match
|
||||
the hardware's version.
|
||||
|
||||
Each level of hierarchy is represented by a 32 bit value, and so there can be
|
||||
only 32 different supported version per hierarchy. i.e. 1 bit per version. A
|
||||
value of 0xFFFFFFFF will enable the OPP for all versions for that hierarchy
|
||||
level. And a value of 0x00000000 will disable the OPP completely, and so we
|
||||
never want that to happen.
|
||||
Each sub-group is a platform defined array representing the hierarchy of
|
||||
hardware versions supported by the platform. For a platform with three
|
||||
hierarchical levels of version (X.Y.Z), this field shall look like
|
||||
|
||||
If 32 values aren't sufficient for a version hierarchy, than that version
|
||||
hierarchy can be contained in multiple 32 bit values. i.e. <X Y Z1 Z2> in the
|
||||
above example, Z1 & Z2 refer to the version hierarchy Z.
|
||||
opp-supported-hw = <X1 Y1 Z1>, <X2 Y2 Z2>, <X3 Y3 Z3>.
|
||||
|
||||
Each level (eg. X1) in version hierarchy is represented by a 32 bit value, one
|
||||
bit per version and so there can be maximum 32 versions per level. Logical AND
|
||||
(&) operation is performed for each level with the hardware's level version
|
||||
and a non-zero output for _all_ the levels in a sub-group means the OPP is
|
||||
supported by hardware. A value of 0xFFFFFFFF for each level in the sub-group
|
||||
will enable the OPP for all versions for the hardware.
|
||||
|
||||
- status: Marks the node enabled/disabled.
|
||||
|
||||
@@ -503,7 +505,6 @@ Example 5: opp-supported-hw
|
||||
*/
|
||||
opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
|
||||
opp-hz = /bits/ 64 <600000000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
|
||||
@@ -516,7 +517,17 @@ Example 5: opp-supported-hw
|
||||
*/
|
||||
opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
|
||||
opp-900000000 {
|
||||
/*
|
||||
* Supports:
|
||||
* - All cuts and substrate where process version is 0x2.
|
||||
* - All cuts and process where substrate version is 0x2.
|
||||
*/
|
||||
opp-supported-hw = <0xFFFFFFFF 0xFFFFFFFF 0x02>, <0xFFFFFFFF 0x01 0xFFFFFFFF>
|
||||
opp-hz = /bits/ 64 <900000000>;
|
||||
...
|
||||
};
|
||||
};
|
||||
|
||||
@@ -5388,7 +5388,6 @@ F: include/linux/kobj*
|
||||
F: lib/kobj*
|
||||
|
||||
DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
|
||||
M: Kevin Hilman <khilman@kernel.org>
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
@@ -26,14 +26,6 @@
|
||||
opp-microvolt = <800000 800000 1125000>;
|
||||
};
|
||||
|
||||
opp@456000000,800,2,2 {
|
||||
opp-microvolt = <800000 800000 1125000>;
|
||||
};
|
||||
|
||||
opp@456000000,800,3,2 {
|
||||
opp-microvolt = <800000 800000 1125000>;
|
||||
};
|
||||
|
||||
opp@456000000,825 {
|
||||
opp-microvolt = <825000 825000 1125000>;
|
||||
};
|
||||
@@ -46,10 +38,6 @@
|
||||
opp-microvolt = <800000 800000 1125000>;
|
||||
};
|
||||
|
||||
opp@608000000,800,3,2 {
|
||||
opp-microvolt = <800000 800000 1125000>;
|
||||
};
|
||||
|
||||
opp@608000000,825 {
|
||||
opp-microvolt = <825000 825000 1125000>;
|
||||
};
|
||||
@@ -78,18 +66,6 @@
|
||||
opp-microvolt = <875000 875000 1125000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,1,1 {
|
||||
opp-microvolt = <875000 875000 1125000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,0,2 {
|
||||
opp-microvolt = <875000 875000 1125000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,1,2 {
|
||||
opp-microvolt = <875000 875000 1125000>;
|
||||
};
|
||||
|
||||
opp@760000000,900 {
|
||||
opp-microvolt = <900000 900000 1125000>;
|
||||
};
|
||||
@@ -134,14 +110,6 @@
|
||||
opp-microvolt = <950000 950000 1125000>;
|
||||
};
|
||||
|
||||
opp@912000000,950,0,2 {
|
||||
opp-microvolt = <950000 950000 1125000>;
|
||||
};
|
||||
|
||||
opp@912000000,950,2,2 {
|
||||
opp-microvolt = <950000 950000 1125000>;
|
||||
};
|
||||
|
||||
opp@912000000,1000 {
|
||||
opp-microvolt = <1000000 1000000 1125000>;
|
||||
};
|
||||
@@ -170,10 +138,6 @@
|
||||
opp-microvolt = <1000000 1000000 1125000>;
|
||||
};
|
||||
|
||||
opp@1000000000,1000,0,2 {
|
||||
opp-microvolt = <1000000 1000000 1125000>;
|
||||
};
|
||||
|
||||
opp@1000000000,1025 {
|
||||
opp-microvolt = <1025000 1025000 1125000>;
|
||||
};
|
||||
|
||||
@@ -37,19 +37,8 @@
|
||||
|
||||
opp@456000000,800 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x03 0x0006>;
|
||||
opp-hz = /bits/ 64 <456000000>;
|
||||
};
|
||||
|
||||
opp@456000000,800,2,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x04 0x0004>;
|
||||
opp-hz = /bits/ 64 <456000000>;
|
||||
};
|
||||
|
||||
opp@456000000,800,3,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x08 0x0004>;
|
||||
opp-supported-hw = <0x03 0x0006>, <0x04 0x0004>,
|
||||
<0x08 0x0004>;
|
||||
opp-hz = /bits/ 64 <456000000>;
|
||||
};
|
||||
|
||||
@@ -67,13 +56,7 @@
|
||||
|
||||
opp@608000000,800 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x04 0x0006>;
|
||||
opp-hz = /bits/ 64 <608000000>;
|
||||
};
|
||||
|
||||
opp@608000000,800,3,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x08 0x0004>;
|
||||
opp-supported-hw = <0x04 0x0006>, <0x08 0x0004>;
|
||||
opp-hz = /bits/ 64 <608000000>;
|
||||
};
|
||||
|
||||
@@ -115,25 +98,8 @@
|
||||
|
||||
opp@760000000,875 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x04 0x0001>;
|
||||
opp-hz = /bits/ 64 <760000000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,1,1 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x02 0x0002>;
|
||||
opp-hz = /bits/ 64 <760000000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,0,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x01 0x0004>;
|
||||
opp-hz = /bits/ 64 <760000000>;
|
||||
};
|
||||
|
||||
opp@760000000,875,1,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x02 0x0004>;
|
||||
opp-supported-hw = <0x04 0x0001>, <0x02 0x0002>,
|
||||
<0x01 0x0004>, <0x02 0x0004>;
|
||||
opp-hz = /bits/ 64 <760000000>;
|
||||
};
|
||||
|
||||
@@ -199,19 +165,8 @@
|
||||
|
||||
opp@912000000,950 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x02 0x0006>;
|
||||
opp-hz = /bits/ 64 <912000000>;
|
||||
};
|
||||
|
||||
opp@912000000,950,0,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x01 0x0004>;
|
||||
opp-hz = /bits/ 64 <912000000>;
|
||||
};
|
||||
|
||||
opp@912000000,950,2,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x04 0x0004>;
|
||||
opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>,
|
||||
<0x04 0x0004>;
|
||||
opp-hz = /bits/ 64 <912000000>;
|
||||
};
|
||||
|
||||
@@ -253,13 +208,7 @@
|
||||
|
||||
opp@1000000000,1000 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x02 0x0006>;
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
};
|
||||
|
||||
opp@1000000000,1000,0,2 {
|
||||
clock-latency-ns = <400000>;
|
||||
opp-supported-hw = <0x01 0x0004>;
|
||||
opp-supported-hw = <0x02 0x0006>, <0x01 0x0004>;
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
};
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,13 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/arch_topology.h>
|
||||
|
||||
/* big.LITTLE switcher is incompatible with frequency invariance */
|
||||
#ifndef CONFIG_BL_SWITCHER
|
||||
/* Replace task scheduler's default frequency-invariant accounting */
|
||||
#define arch_set_freq_scale topology_set_freq_scale
|
||||
#define arch_scale_freq_capacity topology_get_freq_scale
|
||||
#define arch_scale_freq_invariant topology_scale_freq_invariant
|
||||
#endif
|
||||
|
||||
/* Replace task scheduler's default cpu-invariant accounting */
|
||||
#define arch_scale_cpu_capacity topology_get_cpu_scale
|
||||
|
||||
@@ -26,7 +26,9 @@ void topology_scale_freq_tick(void);
|
||||
#endif /* CONFIG_ARM64_AMU_EXTN */
|
||||
|
||||
/* Replace task scheduler's default frequency-invariant accounting */
|
||||
#define arch_set_freq_scale topology_set_freq_scale
|
||||
#define arch_scale_freq_capacity topology_get_freq_scale
|
||||
#define arch_scale_freq_invariant topology_scale_freq_invariant
|
||||
|
||||
/* Replace task scheduler's default cpu-invariant accounting */
|
||||
#define arch_scale_cpu_capacity topology_get_cpu_scale
|
||||
|
||||
@@ -248,6 +248,13 @@ static int __init init_amu_fie(void)
|
||||
static_branch_enable(&amu_fie_key);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the system is not fully invariant after AMU init, disable
|
||||
* partial use of counters for frequency invariance.
|
||||
*/
|
||||
if (!topology_scale_freq_invariant())
|
||||
static_branch_disable(&amu_fie_key);
|
||||
|
||||
free_valid_mask:
|
||||
free_cpumask_var(valid_cpus);
|
||||
|
||||
@@ -255,7 +262,7 @@ free_valid_mask:
|
||||
}
|
||||
late_initcall_sync(init_amu_fie);
|
||||
|
||||
bool arch_freq_counters_available(struct cpumask *cpus)
|
||||
bool arch_freq_counters_available(const struct cpumask *cpus)
|
||||
{
|
||||
return amu_freq_invariant() &&
|
||||
cpumask_subset(cpus, amu_fie_cpus);
|
||||
|
||||
@@ -798,22 +798,34 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
|
||||
memset(&cx, 0, sizeof(cx));
|
||||
|
||||
element = &cst->package.elements[i];
|
||||
if (element->type != ACPI_TYPE_PACKAGE)
|
||||
if (element->type != ACPI_TYPE_PACKAGE) {
|
||||
acpi_handle_info(handle, "_CST C%d type(%x) is not package, skip...\n",
|
||||
i, element->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (element->package.count != 4)
|
||||
if (element->package.count != 4) {
|
||||
acpi_handle_info(handle, "_CST C%d package count(%d) is not 4, skip...\n",
|
||||
i, element->package.count);
|
||||
continue;
|
||||
}
|
||||
|
||||
obj = &element->package.elements[0];
|
||||
|
||||
if (obj->type != ACPI_TYPE_BUFFER)
|
||||
if (obj->type != ACPI_TYPE_BUFFER) {
|
||||
acpi_handle_info(handle, "_CST C%d package element[0] type(%x) is not buffer, skip...\n",
|
||||
i, obj->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
reg = (struct acpi_power_register *)obj->buffer.pointer;
|
||||
|
||||
obj = &element->package.elements[1];
|
||||
if (obj->type != ACPI_TYPE_INTEGER)
|
||||
if (obj->type != ACPI_TYPE_INTEGER) {
|
||||
acpi_handle_info(handle, "_CST C[%d] package element[1] type(%x) is not integer, skip...\n",
|
||||
i, obj->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
cx.type = obj->integer.value;
|
||||
/*
|
||||
@@ -850,6 +862,8 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
|
||||
cx.entry_method = ACPI_CSTATE_HALT;
|
||||
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
|
||||
} else {
|
||||
acpi_handle_info(handle, "_CST C%d declares FIXED_HARDWARE C-state but not supported in hardware, skip...\n",
|
||||
i);
|
||||
continue;
|
||||
}
|
||||
} else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
@@ -857,6 +871,8 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
|
||||
snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
|
||||
cx.address);
|
||||
} else {
|
||||
acpi_handle_info(handle, "_CST C%d space_id(%x) neither FIXED_HARDWARE nor SYSTEM_IO, skip...\n",
|
||||
i, reg->space_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -864,14 +880,20 @@ int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
|
||||
cx.valid = 1;
|
||||
|
||||
obj = &element->package.elements[2];
|
||||
if (obj->type != ACPI_TYPE_INTEGER)
|
||||
if (obj->type != ACPI_TYPE_INTEGER) {
|
||||
acpi_handle_info(handle, "_CST C%d package element[2] type(%x) not integer, skip...\n",
|
||||
i, obj->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
cx.latency = obj->integer.value;
|
||||
|
||||
obj = &element->package.elements[3];
|
||||
if (obj->type != ACPI_TYPE_INTEGER)
|
||||
if (obj->type != ACPI_TYPE_INTEGER) {
|
||||
acpi_handle_info(handle, "_CST C%d package element[3] type(%x) not integer, skip...\n",
|
||||
i, obj->type);
|
||||
continue;
|
||||
}
|
||||
|
||||
memcpy(&info->states[++last_index], &cx, sizeof(cx));
|
||||
}
|
||||
|
||||
@@ -2011,20 +2011,16 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
if (acpi_any_gpe_status_set(first_ec->gpe))
|
||||
return true;
|
||||
|
||||
if (ec_no_wakeup)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
*/
|
||||
ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
|
||||
if (ret == ACPI_INTERRUPT_HANDLED) {
|
||||
if (ret == ACPI_INTERRUPT_HANDLED)
|
||||
pm_pr_dbg("ACPI EC GPE dispatched\n");
|
||||
|
||||
/* Flush the event and query workqueues. */
|
||||
acpi_ec_flush_work();
|
||||
}
|
||||
/* Flush the event and query workqueues. */
|
||||
acpi_ec_flush_work();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -21,18 +21,27 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
__weak bool arch_freq_counters_available(struct cpumask *cpus)
|
||||
bool topology_scale_freq_invariant(void)
|
||||
{
|
||||
return cpufreq_supports_freq_invariance() ||
|
||||
arch_freq_counters_available(cpu_online_mask);
|
||||
}
|
||||
|
||||
__weak bool arch_freq_counters_available(const struct cpumask *cpus)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
|
||||
|
||||
void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
|
||||
unsigned long max_freq)
|
||||
void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
|
||||
unsigned long max_freq)
|
||||
{
|
||||
unsigned long scale;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!cur_freq || !max_freq))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the use of counters for FIE is enabled, just return as we don't
|
||||
* want to update the scale factor with information from CPUFREQ.
|
||||
|
||||
@@ -123,7 +123,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
|
||||
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
|
||||
#define genpd_unlock(p) p->lock_ops->unlock(p)
|
||||
|
||||
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
|
||||
#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
|
||||
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
||||
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
||||
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
|
||||
@@ -222,7 +222,7 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
|
||||
* out of off and so update the idle time and vice
|
||||
* versa.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_ACTIVE) {
|
||||
if (genpd->status == GENPD_STATE_ON) {
|
||||
int state_idx = genpd->state_idx;
|
||||
|
||||
genpd->states[state_idx].idle_time =
|
||||
@@ -497,6 +497,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
@@ -544,26 +545,15 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
if (!genpd->gov)
|
||||
genpd->state_idx = 0;
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
/* Don't power off, if a child domain is waiting to power on. */
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the parent yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
genpd_update_accounting(genpd);
|
||||
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
@@ -616,7 +606,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
genpd->status = GENPD_STATE_ON;
|
||||
genpd_update_accounting(genpd);
|
||||
|
||||
return 0;
|
||||
@@ -961,7 +951,7 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
||||
if (_genpd_power_off(genpd, false))
|
||||
return;
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->child_links, child_node) {
|
||||
genpd_sd_counter_dec(link->parent);
|
||||
@@ -1007,8 +997,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
|
||||
}
|
||||
|
||||
_genpd_power_on(genpd, false);
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
genpd->status = GENPD_STATE_ON;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1287,7 +1276,7 @@ static int genpd_restore_noirq(struct device *dev)
|
||||
* so make it appear as powered off to genpd_sync_power_on(),
|
||||
* so that it tries to power it on in case it was really off.
|
||||
*/
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
genpd->status = GENPD_STATE_OFF;
|
||||
|
||||
genpd_sync_power_on(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
@@ -1777,7 +1766,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
genpd->gov = gov;
|
||||
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
||||
atomic_set(&genpd->sd_count, 0);
|
||||
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
||||
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
|
||||
genpd->device_count = 0;
|
||||
genpd->max_off_time_ns = -1;
|
||||
genpd->max_off_time_changed = true;
|
||||
@@ -2044,8 +2033,9 @@ int of_genpd_add_provider_simple(struct device_node *np,
|
||||
if (genpd->set_performance_state) {
|
||||
ret = dev_pm_opp_of_add_table(&genpd->dev);
|
||||
if (ret) {
|
||||
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
|
||||
ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
|
||||
ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
@@ -2054,7 +2044,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
|
||||
* state.
|
||||
*/
|
||||
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
|
||||
WARN_ON(!genpd->opp_table);
|
||||
WARN_ON(IS_ERR(genpd->opp_table));
|
||||
}
|
||||
|
||||
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
|
||||
@@ -2111,8 +2101,9 @@ int of_genpd_add_provider_onecell(struct device_node *np,
|
||||
if (genpd->set_performance_state) {
|
||||
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
|
||||
if (ret) {
|
||||
dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
|
||||
i, ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
|
||||
i, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -2121,7 +2112,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
|
||||
* performance state.
|
||||
*/
|
||||
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
|
||||
WARN_ON(!genpd->opp_table);
|
||||
WARN_ON(IS_ERR(genpd->opp_table));
|
||||
}
|
||||
|
||||
genpd->provider = &np->fwnode;
|
||||
@@ -2802,8 +2793,8 @@ static int genpd_summary_one(struct seq_file *s,
|
||||
struct generic_pm_domain *genpd)
|
||||
{
|
||||
static const char * const status_lookup[] = {
|
||||
[GPD_STATE_ACTIVE] = "on",
|
||||
[GPD_STATE_POWER_OFF] = "off"
|
||||
[GENPD_STATE_ON] = "on",
|
||||
[GENPD_STATE_OFF] = "off"
|
||||
};
|
||||
struct pm_domain_data *pm_data;
|
||||
const char *kobj_path;
|
||||
@@ -2881,8 +2872,8 @@ static int summary_show(struct seq_file *s, void *data)
|
||||
static int status_show(struct seq_file *s, void *data)
|
||||
{
|
||||
static const char * const status_lookup[] = {
|
||||
[GPD_STATE_ACTIVE] = "on",
|
||||
[GPD_STATE_POWER_OFF] = "off"
|
||||
[GENPD_STATE_ON] = "on",
|
||||
[GENPD_STATE_OFF] = "off"
|
||||
};
|
||||
|
||||
struct generic_pm_domain *genpd = s->private;
|
||||
@@ -2895,7 +2886,7 @@ static int status_show(struct seq_file *s, void *data)
|
||||
if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
goto exit;
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
if (genpd->status == GENPD_STATE_OFF)
|
||||
seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
|
||||
genpd->state_idx);
|
||||
else
|
||||
@@ -2938,7 +2929,7 @@ static int idle_states_show(struct seq_file *s, void *data)
|
||||
ktime_t delta = 0;
|
||||
s64 msecs;
|
||||
|
||||
if ((genpd->status == GPD_STATE_POWER_OFF) &&
|
||||
if ((genpd->status == GENPD_STATE_OFF) &&
|
||||
(genpd->state_idx == i))
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
@@ -2961,7 +2952,7 @@ static int active_time_show(struct seq_file *s, void *data)
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
|
||||
if (genpd->status == GPD_STATE_ACTIVE)
|
||||
if (genpd->status == GENPD_STATE_ON)
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
seq_printf(s, "%lld ms\n", ktime_to_ms(
|
||||
@@ -2984,7 +2975,7 @@ static int total_idle_time_show(struct seq_file *s, void *data)
|
||||
|
||||
for (i = 0; i < genpd->state_count; i++) {
|
||||
|
||||
if ((genpd->status == GPD_STATE_POWER_OFF) &&
|
||||
if ((genpd->status == GENPD_STATE_OFF) &&
|
||||
(genpd->state_idx == i))
|
||||
delta = ktime_sub(ktime_get(), genpd->accounting_time);
|
||||
|
||||
|
||||
@@ -291,8 +291,7 @@ static int rpm_get_suppliers(struct device *dev)
|
||||
device_links_read_lock_held()) {
|
||||
int retval;
|
||||
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
|
||||
READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
continue;
|
||||
|
||||
retval = pm_runtime_get_sync(link->supplier);
|
||||
@@ -312,8 +311,6 @@ static void rpm_put_suppliers(struct device *dev)
|
||||
|
||||
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||
device_links_read_lock_held()) {
|
||||
if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
|
||||
continue;
|
||||
|
||||
while (refcount_dec_not_one(&link->rpm_active))
|
||||
pm_runtime_put(link->supplier);
|
||||
|
||||
@@ -283,7 +283,7 @@ config ARM_SPEAR_CPUFREQ
|
||||
|
||||
config ARM_STI_CPUFREQ
|
||||
tristate "STi CPUFreq support"
|
||||
depends on SOC_STIH407
|
||||
depends on CPUFREQ_DT && SOC_STIH407
|
||||
help
|
||||
This driver uses the generic OPP framework to match the running
|
||||
platform with a predefined set of suitable values. If not provided
|
||||
|
||||
@@ -484,6 +484,12 @@ remove_opp:
|
||||
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
|
||||
late_initcall(armada37xx_cpufreq_driver_init);
|
||||
|
||||
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
|
||||
{ .compatible = "marvell,armada-3700-nb-pm" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
|
||||
|
||||
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
|
||||
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
@@ -137,6 +137,7 @@ static const struct of_device_id blacklist[] __initconst = {
|
||||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
{ .compatible = "st,stih418", },
|
||||
|
||||
{ .compatible = "sigma,tango4", },
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pm_opp.h>
|
||||
@@ -24,32 +25,41 @@
|
||||
#include "cpufreq-dt.h"
|
||||
|
||||
struct private_data {
|
||||
struct opp_table *opp_table;
|
||||
struct list_head node;
|
||||
|
||||
cpumask_var_t cpus;
|
||||
struct device *cpu_dev;
|
||||
const char *reg_name;
|
||||
struct opp_table *opp_table;
|
||||
struct opp_table *reg_opp_table;
|
||||
bool have_static_opps;
|
||||
};
|
||||
|
||||
static LIST_HEAD(priv_list);
|
||||
|
||||
static struct freq_attr *cpufreq_dt_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL, /* Extra space for boost-attr if required */
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct private_data *cpufreq_dt_find_data(int cpu)
|
||||
{
|
||||
struct private_data *priv;
|
||||
|
||||
list_for_each_entry(priv, &priv_list, node) {
|
||||
if (cpumask_test_cpu(cpu, priv->cpus))
|
||||
return priv;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
struct private_data *priv = policy->driver_data;
|
||||
unsigned long freq = policy->freq_table[index].frequency;
|
||||
int ret;
|
||||
|
||||
ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
|
||||
|
||||
if (!ret) {
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -90,83 +100,24 @@ node_put:
|
||||
return name;
|
||||
}
|
||||
|
||||
static int resources_available(void)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
struct regulator *cpu_reg;
|
||||
struct clk *cpu_clk;
|
||||
int ret = 0;
|
||||
const char *name;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu0 device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
ret = PTR_ERR_OR_ZERO(cpu_clk);
|
||||
if (ret) {
|
||||
/*
|
||||
* If cpu's clk node is present, but clock is not yet
|
||||
* registered, we should try defering probe.
|
||||
*/
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_dbg(cpu_dev, "clock not ready, retry\n");
|
||||
else
|
||||
dev_err(cpu_dev, "failed to get clock: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
clk_put(cpu_clk);
|
||||
|
||||
ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
name = find_supply_name(cpu_dev);
|
||||
/* Platform doesn't require regulator */
|
||||
if (!name)
|
||||
return 0;
|
||||
|
||||
cpu_reg = regulator_get_optional(cpu_dev, name);
|
||||
ret = PTR_ERR_OR_ZERO(cpu_reg);
|
||||
if (ret) {
|
||||
/*
|
||||
* If cpu's regulator supply node is present, but regulator is
|
||||
* not yet registered, we should try defering probe.
|
||||
*/
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
|
||||
else
|
||||
dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
regulator_put(cpu_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct opp_table *opp_table = NULL;
|
||||
struct private_data *priv;
|
||||
struct device *cpu_dev;
|
||||
struct clk *cpu_clk;
|
||||
unsigned int transition_latency;
|
||||
bool fallback = false;
|
||||
const char *name;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu%d device\n", policy->cpu);
|
||||
priv = cpufreq_dt_find_data(policy->cpu);
|
||||
if (!priv) {
|
||||
pr_err("failed to find data for cpu%d\n", policy->cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_dev = priv->cpu_dev;
|
||||
cpumask_copy(policy->cpus, priv->cpus);
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
@@ -174,45 +125,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get OPP-sharing information from "operating-points-v2" bindings */
|
||||
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
|
||||
if (ret) {
|
||||
if (ret != -ENOENT)
|
||||
goto out_put_clk;
|
||||
|
||||
/*
|
||||
* operating-points-v2 not supported, fallback to old method of
|
||||
* finding shared-OPPs for backward compatibility if the
|
||||
* platform hasn't set sharing CPUs.
|
||||
*/
|
||||
if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
|
||||
fallback = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* OPP layer will be taking care of regulators now, but it needs to know
|
||||
* the name of the regulator first.
|
||||
*/
|
||||
name = find_supply_name(cpu_dev);
|
||||
if (name) {
|
||||
opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
|
||||
if (IS_ERR(opp_table)) {
|
||||
ret = PTR_ERR(opp_table);
|
||||
dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
|
||||
policy->cpu, ret);
|
||||
goto out_put_clk;
|
||||
}
|
||||
}
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put_regulator;
|
||||
}
|
||||
|
||||
priv->reg_name = name;
|
||||
priv->opp_table = opp_table;
|
||||
|
||||
/*
|
||||
* Initialize OPP tables for all policy->cpus. They will be shared by
|
||||
* all CPUs which have marked their CPUs shared with OPP bindings.
|
||||
@@ -232,31 +144,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
||||
*/
|
||||
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (ret <= 0) {
|
||||
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
|
||||
ret = -EPROBE_DEFER;
|
||||
dev_err(cpu_dev, "OPP table can't be empty\n");
|
||||
ret = -ENODEV;
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
if (fallback) {
|
||||
cpumask_setall(policy->cpus);
|
||||
|
||||
/*
|
||||
* OPP tables are initialized only for policy->cpu, do it for
|
||||
* others as well.
|
||||
*/
|
||||
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
|
||||
if (ret)
|
||||
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
policy->driver_data = priv;
|
||||
policy->clk = cpu_clk;
|
||||
policy->freq_table = freq_table;
|
||||
@@ -288,11 +186,6 @@ out_free_cpufreq_table:
|
||||
out_free_opp:
|
||||
if (priv->have_static_opps)
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
|
||||
kfree(priv);
|
||||
out_put_regulator:
|
||||
if (name)
|
||||
dev_pm_opp_put_regulators(opp_table);
|
||||
out_put_clk:
|
||||
clk_put(cpu_clk);
|
||||
|
||||
return ret;
|
||||
@@ -320,12 +213,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
|
||||
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
||||
if (priv->have_static_opps)
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
|
||||
if (priv->reg_name)
|
||||
dev_pm_opp_put_regulators(priv->opp_table);
|
||||
|
||||
clk_put(policy->clk);
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -344,21 +232,119 @@ static struct cpufreq_driver dt_cpufreq_driver = {
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
static int dt_cpufreq_early_init(struct device *dev, int cpu)
|
||||
{
|
||||
struct private_data *priv;
|
||||
struct device *cpu_dev;
|
||||
const char *reg_name;
|
||||
int ret;
|
||||
|
||||
/* Check if this CPU is already covered by some other policy */
|
||||
if (cpufreq_dt_find_data(cpu))
|
||||
return 0;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
|
||||
/* Try to get OPP table early to ensure resources are available */
|
||||
priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
|
||||
if (IS_ERR(priv->opp_table)) {
|
||||
ret = PTR_ERR(priv->opp_table);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
|
||||
goto free_cpumask;
|
||||
}
|
||||
|
||||
/*
|
||||
* OPP layer will be taking care of regulators now, but it needs to know
|
||||
* the name of the regulator first.
|
||||
*/
|
||||
reg_name = find_supply_name(cpu_dev);
|
||||
if (reg_name) {
|
||||
priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
|
||||
®_name, 1);
|
||||
if (IS_ERR(priv->reg_opp_table)) {
|
||||
ret = PTR_ERR(priv->reg_opp_table);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to set regulators: %d\n",
|
||||
ret);
|
||||
goto put_table;
|
||||
}
|
||||
}
|
||||
|
||||
/* Find OPP sharing information so we can fill pri->cpus here */
|
||||
/* Get OPP-sharing information from "operating-points-v2" bindings */
|
||||
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
|
||||
if (ret) {
|
||||
if (ret != -ENOENT)
|
||||
goto put_reg;
|
||||
|
||||
/*
|
||||
* operating-points-v2 not supported, fallback to all CPUs share
|
||||
* OPP for backward compatibility if the platform hasn't set
|
||||
* sharing CPUs.
|
||||
*/
|
||||
if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
|
||||
cpumask_setall(priv->cpus);
|
||||
|
||||
/*
|
||||
* OPP tables are initialized only for cpu, do it for
|
||||
* others as well.
|
||||
*/
|
||||
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
|
||||
if (ret)
|
||||
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
}
|
||||
|
||||
list_add(&priv->node, &priv_list);
|
||||
return 0;
|
||||
|
||||
put_reg:
|
||||
if (priv->reg_opp_table)
|
||||
dev_pm_opp_put_regulators(priv->reg_opp_table);
|
||||
put_table:
|
||||
dev_pm_opp_put_opp_table(priv->opp_table);
|
||||
free_cpumask:
|
||||
free_cpumask_var(priv->cpus);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dt_cpufreq_release(void)
|
||||
{
|
||||
struct private_data *priv, *tmp;
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, &priv_list, node) {
|
||||
if (priv->reg_opp_table)
|
||||
dev_pm_opp_put_regulators(priv->reg_opp_table);
|
||||
dev_pm_opp_put_opp_table(priv->opp_table);
|
||||
free_cpumask_var(priv->cpus);
|
||||
list_del(&priv->node);
|
||||
}
|
||||
}
|
||||
|
||||
static int dt_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
|
||||
int ret;
|
||||
int ret, cpu;
|
||||
|
||||
/*
|
||||
* All per-cluster (CPUs sharing clock/voltages) initialization is done
|
||||
* from ->init(). In probe(), we just need to make sure that clk and
|
||||
* regulators are available. Else defer probe and retry.
|
||||
*
|
||||
* FIXME: Is checking this only for CPU0 sufficient ?
|
||||
*/
|
||||
ret = resources_available();
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Request resources early so we can return in case of -EPROBE_DEFER */
|
||||
for_each_possible_cpu(cpu) {
|
||||
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
if (data->have_governor_per_policy)
|
||||
@@ -374,15 +360,21 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
ret = cpufreq_register_driver(&dt_cpufreq_driver);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed register driver: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
dt_cpufreq_release();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dt_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpufreq_unregister_driver(&dt_cpufreq_driver);
|
||||
dt_cpufreq_release();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user