Merge tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management updates for 3.4 from Rafael Wysocki:
 "Assorted extensions and fixes including:

  * Introduction of early/late suspend/hibernation device callbacks.
  * Generic PM domains extensions and fixes.
  * devfreq updates from Axel Lin and MyungJoo Ham.
  * Device PM QoS updates.
  * Fixes of concurrency problems with wakeup sources.
  * System suspend and hibernation fixes."

* tag 'pm-for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (43 commits)
  PM / Domains: Check domain status during hibernation restore of devices
  PM / devfreq: add relation of recommended frequency.
  PM / shmobile: Make MTU2 driver use pm_genpd_dev_always_on()
  PM / shmobile: Make CMT driver use pm_genpd_dev_always_on()
  PM / shmobile: Make TMU driver use pm_genpd_dev_always_on()
  PM / Domains: Introduce "always on" device flag
  PM / Domains: Fix hibernation restore of devices, v2
  PM / Domains: Fix handling of wakeup devices during system resume
  sh_mmcif / PM: Use PM QoS latency constraint
  tmio_mmc / PM: Use PM QoS latency constraint
  PM / QoS: Make it possible to expose PM QoS latency constraints
  PM / Sleep: JBD and JBD2 missing set_freezable()
  PM / Domains: Fix include for PM_GENERIC_DOMAINS=n case
  PM / Freezer: Remove references to TIF_FREEZE in comments
  PM / Sleep: Add more wakeup source initialization routines
  PM / Hibernate: Enable usermodehelpers in hibernate() error path
  PM / Sleep: Make __pm_stay_awake() delete wakeup source timers
  PM / Sleep: Fix race conditions related to wakeup source timer function
  PM / Sleep: Fix possible infinite loop during wakeup source destruction
  PM / Hibernate: print physical addresses consistently with other parts of kernel
  ...
This commit is contained in:
Linus Torvalds
2012-03-21 10:15:51 -07:00
53 changed files with 1462 additions and 621 deletions
@@ -165,3 +165,21 @@ Description:
Not all drivers support this attribute. If it isn't supported,
attempts to read or write it will yield I/O errors.
What: /sys/devices/.../power/pm_qos_latency_us
Date: March 2012
Contact: Rafael J. Wysocki <rjw@sisk.pl>
Description:
The /sys/devices/.../power/pm_qos_resume_latency_us attribute
contains the PM QoS resume latency limit for the given device,
which is the maximum allowed time it can take to resume the
device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that
the PM QoS resume latency may be arbitrary.
Not all drivers support this attribute. If it isn't supported,
it is not present.
This attribute has no effect on system-wide suspend/resume and
hibernation.
@@ -0,0 +1,21 @@
* Samsung Exynos Power Domains
Exynos processors include support for multiple power domains which are used
to gate power to one or more peripherals on the processor.
Required Properties:
- compatiable: should be one of the following.
* samsung,exynos4210-pd - for exynos4210 type power domain.
- reg: physical base address of the controller and length of memory mapped
region.
Optional Properties:
- samsung,exynos4210-pd-off: Specifies that the power domain is in turned-off
state during boot and remains to be turned-off until explicitly turned-on.
Example:
lcd0: power-domain-lcd0 {
compatible = "samsung,exynos4210-pd";
reg = <0x10023C00 0x10>;
};
+61 -32
View File
@@ -96,6 +96,12 @@ struct dev_pm_ops {
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
int (*suspend_late)(struct device *dev);
int (*resume_early)(struct device *dev);
int (*freeze_late)(struct device *dev);
int (*thaw_early)(struct device *dev);
int (*poweroff_late)(struct device *dev);
int (*restore_early)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
@@ -305,7 +311,7 @@ Entering System Suspend
-----------------------
When the system goes into the standby or memory sleep state, the phases are:
prepare, suspend, suspend_noirq.
prepare, suspend, suspend_late, suspend_noirq.
1. The prepare phase is meant to prevent races by preventing new devices
from being registered; the PM core would never know that all the
@@ -324,7 +330,12 @@ When the system goes into the standby or memory sleep state, the phases are:
appropriate low-power state, depending on the bus type the device is on,
and they may enable wakeup events.
3. The suspend_noirq phase occurs after IRQ handlers have been disabled,
3 For a number of devices it is convenient to split suspend into the
"quiesce device" and "save device state" phases, in which cases
suspend_late is meant to do the latter. It is always executed after
runtime power management has been disabled for all devices.
4. The suspend_noirq phase occurs after IRQ handlers have been disabled,
which means that the driver's interrupt handler will not be called while
the callback method is running. The methods should save the values of
the device's registers that weren't saved previously and finally put the
@@ -359,7 +370,7 @@ Leaving System Suspend
----------------------
When resuming from standby or memory sleep, the phases are:
resume_noirq, resume, complete.
resume_noirq, resume_early, resume, complete.
1. The resume_noirq callback methods should perform any actions needed
before the driver's interrupt handlers are invoked. This generally
@@ -375,14 +386,18 @@ When resuming from standby or memory sleep, the phases are:
device driver's ->pm.resume_noirq() method to perform device-specific
actions.
2. The resume methods should bring the the device back to its operating
2. The resume_early methods should prepare devices for the execution of
the resume methods. This generally involves undoing the actions of the
preceding suspend_late phase.
3 The resume methods should bring the the device back to its operating
state, so that it can perform normal I/O. This generally involves
undoing the actions of the suspend phase.
3. The complete phase uses only a bus callback. The method should undo the
actions of the prepare phase. Note, however, that new children may be
registered below the device as soon as the resume callbacks occur; it's
not necessary to wait until the complete phase.
4. The complete phase should undo the actions of the prepare phase. Note,
however, that new children may be registered below the device as soon as
the resume callbacks occur; it's not necessary to wait until the
complete phase.
At the end of these phases, drivers should be as functional as they were before
suspending: I/O can be performed using DMA and IRQs, and the relevant clocks are
@@ -429,8 +444,8 @@ an image of the system memory while everything is stable, reactivate all
devices (thaw), write the image to permanent storage, and finally shut down the
system (poweroff). The phases used to accomplish this are:
prepare, freeze, freeze_noirq, thaw_noirq, thaw, complete,
prepare, poweroff, poweroff_noirq
prepare, freeze, freeze_late, freeze_noirq, thaw_noirq, thaw_early,
thaw, complete, prepare, poweroff, poweroff_late, poweroff_noirq
1. The prepare phase is discussed in the "Entering System Suspend" section
above.
@@ -441,7 +456,11 @@ system (poweroff). The phases used to accomplish this are:
save time it's best not to do so. Also, the device should not be
prepared to generate wakeup events.
3. The freeze_noirq phase is analogous to the suspend_noirq phase discussed
3. The freeze_late phase is analogous to the suspend_late phase described
above, except that the device should not be put in a low-power state and
should not be allowed to generate wakeup events by it.
4. The freeze_noirq phase is analogous to the suspend_noirq phase discussed
above, except again that the device should not be put in a low-power
state and should not be allowed to generate wakeup events.
@@ -449,15 +468,19 @@ At this point the system image is created. All devices should be inactive and
the contents of memory should remain undisturbed while this happens, so that the
image forms an atomic snapshot of the system state.
4. The thaw_noirq phase is analogous to the resume_noirq phase discussed
5. The thaw_noirq phase is analogous to the resume_noirq phase discussed
above. The main difference is that its methods can assume the device is
in the same state as at the end of the freeze_noirq phase.
5. The thaw phase is analogous to the resume phase discussed above. Its
6. The thaw_early phase is analogous to the resume_early phase described
above. Its methods should undo the actions of the preceding
freeze_late, if necessary.
7. The thaw phase is analogous to the resume phase discussed above. Its
methods should bring the device back to an operating state, so that it
can be used for saving the image if necessary.
6. The complete phase is discussed in the "Leaving System Suspend" section
8. The complete phase is discussed in the "Leaving System Suspend" section
above.
At this point the system image is saved, and the devices then need to be
@@ -465,16 +488,19 @@ prepared for the upcoming system shutdown. This is much like suspending them
before putting the system into the standby or memory sleep state, and the phases
are similar.
7. The prepare phase is discussed above.
9. The prepare phase is discussed above.
8. The poweroff phase is analogous to the suspend phase.
10. The poweroff phase is analogous to the suspend phase.
9. The poweroff_noirq phase is analogous to the suspend_noirq phase.
11. The poweroff_late phase is analogous to the suspend_late phase.
The poweroff and poweroff_noirq callbacks should do essentially the same things
as the suspend and suspend_noirq callbacks. The only notable difference is that
they need not store the device register values, because the registers should
already have been stored during the freeze or freeze_noirq phases.
12. The poweroff_noirq phase is analogous to the suspend_noirq phase.
The poweroff, poweroff_late and poweroff_noirq callbacks should do essentially
the same things as the suspend, suspend_late and suspend_noirq callbacks,
respectively. The only notable difference is that they need not store the
device register values, because the registers should already have been stored
during the freeze, freeze_late or freeze_noirq phases.
Leaving Hibernation
@@ -518,22 +544,25 @@ To achieve this, the image kernel must restore the devices' pre-hibernation
functionality. The operation is much like waking up from the memory sleep
state, although it involves different phases:
restore_noirq, restore, complete
restore_noirq, restore_early, restore, complete
1. The restore_noirq phase is analogous to the resume_noirq phase.
2. The restore phase is analogous to the resume phase.
2. The restore_early phase is analogous to the resume_early phase.
3. The complete phase is discussed above.
3. The restore phase is analogous to the resume phase.
The main difference from resume[_noirq] is that restore[_noirq] must assume the
device has been accessed and reconfigured by the boot loader or the boot kernel.
Consequently the state of the device may be different from the state remembered
from the freeze and freeze_noirq phases. The device may even need to be reset
and completely re-initialized. In many cases this difference doesn't matter, so
the resume[_noirq] and restore[_norq] method pointers can be set to the same
routines. Nevertheless, different callback pointers are used in case there is a
situation where it actually matters.
4. The complete phase is discussed above.
The main difference from resume[_early|_noirq] is that restore[_early|_noirq]
must assume the device has been accessed and reconfigured by the boot loader or
the boot kernel. Consequently the state of the device may be different from the
state remembered from the freeze, freeze_late and freeze_noirq phases. The
device may even need to be reset and completely re-initialized. In many cases
this difference doesn't matter, so the resume[_early|_noirq] and
restore[_early|_norq] method pointers can be set to the same routines.
Nevertheless, different callback pointers are used in case there is a situation
where it actually does matter.
Device Power Management Domains
+21
View File
@@ -63,6 +63,27 @@ devices have been reinitialized, the function thaw_processes() is called in
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
have been frozen leave __refrigerator() and continue running.
Rationale behind the functions dealing with freezing and thawing of tasks:
-------------------------------------------------------------------------
freeze_processes():
- freezes only userspace tasks
freeze_kernel_threads():
- freezes all tasks (including kernel threads) because we can't freeze
kernel threads without freezing userspace tasks
thaw_kernel_threads():
- thaws only kernel threads; this is particularly useful if we need to do
anything special in between thawing of kernel threads and thawing of
userspace tasks, or if we want to postpone the thawing of userspace tasks
thaw_processes():
- thaws all tasks (including kernel threads) because we can't thaw userspace
tasks without thawing kernel threads
III. Which kernel threads are freezable?
Kernel threads are not freezable by default. However, a kernel thread may clear
+1 -9
View File
@@ -34,6 +34,7 @@ config CPU_EXYNOS4210
select ARM_CPU_SUSPEND if PM
select S5P_PM if PM
select S5P_SLEEP if PM
select PM_GENERIC_DOMAINS
help
Enable EXYNOS4210 CPU support
@@ -74,11 +75,6 @@ config EXYNOS4_SETUP_FIMD0
help
Common setup code for FIMD0.
config EXYNOS4_DEV_PD
bool
help
Compile in platform device definitions for Power Domain
config EXYNOS4_DEV_SYSMMU
bool
help
@@ -195,7 +191,6 @@ config MACH_SMDKV310
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_DEV_SYSMMU
@@ -243,7 +238,6 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_ONENAND
select S5P_DEV_TV
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_I2C3
@@ -277,7 +271,6 @@ config MACH_NURI
select S5P_DEV_USB_EHCI
select S5P_SETUP_MIPIPHY
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select EXYNOS4_SETUP_FIMC
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
@@ -310,7 +303,6 @@ config MACH_ORIGEN
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_PD
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_SDHCI
+1 -1
View File
@@ -17,6 +17,7 @@ obj-$(CONFIG_CPU_EXYNOS4210) += clock-exynos4210.o
obj-$(CONFIG_SOC_EXYNOS4212) += clock-exynos4212.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o
@@ -45,7 +46,6 @@ obj-$(CONFIG_MACH_EXYNOS4_DT) += mach-exynos4-dt.o
obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
obj-$(CONFIG_EXYNOS4_DEV_PD) += dev-pd.o
obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o
-139
View File
@@ -1,139 +0,0 @@
/* linux/arch/arm/mach-exynos4/dev-pd.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - Power Domain support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <mach/regs-pmu.h>
#include <plat/pd.h>
static int exynos4_pd_enable(struct device *dev)
{
struct samsung_pd_info *pdata = dev->platform_data;
u32 timeout;
__raw_writel(S5P_INT_LOCAL_PWR_EN, pdata->base);
/* Wait max 1ms */
timeout = 10;
while ((__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN)
!= S5P_INT_LOCAL_PWR_EN) {
if (timeout == 0) {
printk(KERN_ERR "Power domain %s enable failed.\n",
dev_name(dev));
return -ETIMEDOUT;
}
timeout--;
udelay(100);
}
return 0;
}
static int exynos4_pd_disable(struct device *dev)
{
struct samsung_pd_info *pdata = dev->platform_data;
u32 timeout;
__raw_writel(0, pdata->base);
/* Wait max 1ms */
timeout = 10;
while (__raw_readl(pdata->base + 0x4) & S5P_INT_LOCAL_PWR_EN) {
if (timeout == 0) {
printk(KERN_ERR "Power domain %s disable failed.\n",
dev_name(dev));
return -ETIMEDOUT;
}
timeout--;
udelay(100);
}
return 0;
}
struct platform_device exynos4_device_pd[] = {
{
.name = "samsung-pd",
.id = 0,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_MFC_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 1,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_G3D_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 2,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_LCD0_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 3,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_LCD1_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 4,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_TV_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 5,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_CAM_CONF,
},
},
}, {
.name = "samsung-pd",
.id = 6,
.dev = {
.platform_data = &(struct samsung_pd_info) {
.enable = exynos4_pd_enable,
.disable = exynos4_pd_disable,
.base = S5P_PMU_GPS_CONF,
},
},
},
};
-11
View File
@@ -1263,9 +1263,6 @@ static struct platform_device *nuri_devices[] __initdata = {
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_pd[PD_MFC],
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_CAM],
&s5p_device_fimc_md,
/* NURI Devices */
@@ -1315,14 +1312,6 @@ static void __init nuri_machine_init(void)
/* Last */
platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev;
s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
s5p_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_mipi_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
}
MACHINE_START(NURI, "NURI")
-14
View File
@@ -621,13 +621,6 @@ static struct platform_device *origen_devices[] __initdata = {
&s5p_device_mfc_r,
&s5p_device_mixer,
&exynos4_device_ohci,
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_TV],
&exynos4_device_pd[PD_G3D],
&exynos4_device_pd[PD_LCD1],
&exynos4_device_pd[PD_CAM],
&exynos4_device_pd[PD_GPS],
&exynos4_device_pd[PD_MFC],
&origen_device_gpiokeys,
&origen_lcd_hv070wsa,
};
@@ -695,13 +688,6 @@ static void __init origen_machine_init(void)
platform_add_devices(origen_devices, ARRAY_SIZE(origen_devices));
s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev;
samsung_bl_set(&origen_bl_gpio_info, &origen_bl_data);
}
-12
View File
@@ -277,13 +277,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_pd[PD_MFC],
&exynos4_device_pd[PD_G3D],
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_LCD1],
&exynos4_device_pd[PD_CAM],
&exynos4_device_pd[PD_TV],
&exynos4_device_pd[PD_GPS],
&exynos4_device_spdif,
&exynos4_device_sysmmu,
&samsung_asoc_dma,
@@ -336,10 +329,6 @@ static void s5p_tv_setup(void)
WARN_ON(gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug"));
s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3));
s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE);
/* setup dependencies between TV devices */
s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev;
}
static void __init smdkv310_map_io(void)
@@ -379,7 +368,6 @@ static void __init smdkv310_machine_init(void)
clk_xusbxti.rate = 24000000;
platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev;
}
MACHINE_START(SMDKV310, "SMDKV310")
@@ -971,7 +971,6 @@ static struct platform_device *universal_devices[] __initdata = {
&s3c_device_i2c5,
&s5p_device_i2c_hdmiphy,
&hdmi_fixed_voltage,
&exynos4_device_pd[PD_TV],
&s5p_device_hdmi,
&s5p_device_sdo,
&s5p_device_mixer,
@@ -984,9 +983,6 @@ static struct platform_device *universal_devices[] __initdata = {
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_pd[PD_MFC],
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_CAM],
&cam_i_core_fixed_reg_dev,
&cam_s_if_fixed_reg_dev,
&s5p_device_fimc_md,
@@ -1005,10 +1001,6 @@ void s5p_tv_setup(void)
gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug");
s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3));
s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE);
/* setup dependencies between TV devices */
s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev;
}
static void __init universal_reserve(void)
@@ -1042,15 +1034,6 @@ static void __init universal_machine_init(void)
/* Last */
platform_add_devices(universal_devices, ARRAY_SIZE(universal_devices));
s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev;
s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
s5p_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_mipi_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
}
MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
+195
View File
@@ -0,0 +1,195 @@
/*
* Exynos Generic power domain support.
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Implementation of Exynos specific power domain control which is used in
* conjunction with runtime-pm. Support for both device-tree and non-device-tree
* based power domain support is included.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_domain.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <mach/regs-pmu.h>
#include <plat/devs.h>
/*
* Exynos specific wrapper around the generic power domain
*/
struct exynos_pm_domain {
void __iomem *base;
char const *name;
bool is_off;
struct generic_pm_domain pd;
};
static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
{
struct exynos_pm_domain *pd;
void __iomem *base;
u32 timeout, pwr;
char *op;
pd = container_of(domain, struct exynos_pm_domain, pd);
base = pd->base;
pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
__raw_writel(pwr, base);
/* Wait max 1ms */
timeout = 10;
while ((__raw_readl(base + 0x4) & S5P_INT_LOCAL_PWR_EN) != pwr) {
if (!timeout) {
op = (power_on) ? "enable" : "disable";
pr_err("Power domain %s %s failed\n", domain->name, op);
return -ETIMEDOUT;
}
timeout--;
cpu_relax();
usleep_range(80, 100);
}
return 0;
}
static int exynos_pd_power_on(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, true);
}
static int exynos_pd_power_off(struct generic_pm_domain *domain)
{
return exynos_pd_power(domain, false);
}
#define EXYNOS_GPD(PD, BASE, NAME) \
static struct exynos_pm_domain PD = { \
.base = (void __iomem *)BASE, \
.name = NAME, \
.pd = { \
.power_off = exynos_pd_power_off, \
.power_on = exynos_pd_power_on, \
}, \
}
#ifdef CONFIG_OF
static __init int exynos_pm_dt_parse_domains(void)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
struct exynos_pm_domain *pd;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
pr_err("%s: failed to allocate memory for domain\n",
__func__);
return -ENOMEM;
}
if (of_get_property(np, "samsung,exynos4210-pd-off", NULL))
pd->is_off = true;
pd->name = np->name;
pd->base = of_iomap(np, 0);
pd->pd.power_off = exynos_pd_power_off;
pd->pd.power_on = exynos_pd_power_on;
pd->pd.of_node = np;
pm_genpd_init(&pd->pd, NULL, false);
}
return 0;
}
#else
static __init int exynos_pm_dt_parse_domains(void)
{
return 0;
}
#endif /* CONFIG_OF */
static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
if (pm_genpd_add_device(&pd->pd, &pdev->dev))
pr_info("%s: error in adding %s device to %s power"
"domain\n", __func__, dev_name(&pdev->dev),
pd->name);
}
}
EXYNOS_GPD(exynos4_pd_mfc, S5P_PMU_MFC_CONF, "pd-mfc");
EXYNOS_GPD(exynos4_pd_g3d, S5P_PMU_G3D_CONF, "pd-g3d");
EXYNOS_GPD(exynos4_pd_lcd0, S5P_PMU_LCD0_CONF, "pd-lcd0");
EXYNOS_GPD(exynos4_pd_lcd1, S5P_PMU_LCD1_CONF, "pd-lcd1");
EXYNOS_GPD(exynos4_pd_tv, S5P_PMU_TV_CONF, "pd-tv");
EXYNOS_GPD(exynos4_pd_cam, S5P_PMU_CAM_CONF, "pd-cam");
EXYNOS_GPD(exynos4_pd_gps, S5P_PMU_GPS_CONF, "pd-gps");
static struct exynos_pm_domain *exynos4_pm_domains[] = {
&exynos4_pd_mfc,
&exynos4_pd_g3d,
&exynos4_pd_lcd0,
&exynos4_pd_lcd1,
&exynos4_pd_tv,
&exynos4_pd_cam,
&exynos4_pd_gps,
};
static __init int exynos4_pm_init_power_domain(void)
{
int idx;
if (of_have_populated_dt())
return exynos_pm_dt_parse_domains();
for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
exynos4_pm_domains[idx]->is_off);
#ifdef CONFIG_S5P_DEV_FIMD0
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
#endif
#ifdef CONFIG_S5P_DEV_TV
exynos_pm_add_dev_to_genpd(&s5p_device_hdmi, &exynos4_pd_tv);
exynos_pm_add_dev_to_genpd(&s5p_device_mixer, &exynos4_pd_tv);
#endif
#ifdef CONFIG_S5P_DEV_MFC
exynos_pm_add_dev_to_genpd(&s5p_device_mfc, &exynos4_pd_mfc);
#endif
#ifdef CONFIG_S5P_DEV_FIMC0
exynos_pm_add_dev_to_genpd(&s5p_device_fimc0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC1
exynos_pm_add_dev_to_genpd(&s5p_device_fimc1, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC2
exynos_pm_add_dev_to_genpd(&s5p_device_fimc2, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_FIMC3
exynos_pm_add_dev_to_genpd(&s5p_device_fimc3, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS0
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis0, &exynos4_pd_cam);
#endif
#ifdef CONFIG_S5P_DEV_CSIS1
exynos_pm_add_dev_to_genpd(&s5p_device_mipi_csis1, &exynos4_pd_cam);
#endif
return 0;
}
arch_initcall(exynos4_pm_init_power_domain);
static __init int exynos_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
late_initcall(exynos_pm_late_initcall);
+2
View File
@@ -1043,6 +1043,8 @@ void __init sh7372_add_standard_devices(void)
sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device);
sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device);
sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device);
sh7372_add_device_to_domain(&sh7372_a4r, &tmu00_device);
sh7372_add_device_to_domain(&sh7372_a4r, &tmu01_device);
}
void __init sh7372_add_early_devices(void)
+5 -6
View File
@@ -1234,8 +1234,7 @@ static int suspend(int vetoable)
struct apm_user *as;
dpm_suspend_start(PMSG_SUSPEND);
dpm_suspend_noirq(PMSG_SUSPEND);
dpm_suspend_end(PMSG_SUSPEND);
local_irq_disable();
syscore_suspend();
@@ -1259,9 +1258,9 @@ static int suspend(int vetoable)
syscore_resume();
local_irq_enable();
dpm_resume_noirq(PMSG_RESUME);
dpm_resume_start(PMSG_RESUME);
dpm_resume_end(PMSG_RESUME);
queue_event(APM_NORMAL_RESUME, NULL);
spin_lock(&user_list_lock);
for (as = user_list; as != NULL; as = as->next) {
@@ -1277,7 +1276,7 @@ static void standby(void)
{
int err;
dpm_suspend_noirq(PMSG_SUSPEND);
dpm_suspend_end(PMSG_SUSPEND);
local_irq_disable();
syscore_suspend();
@@ -1291,7 +1290,7 @@ static void standby(void)
syscore_resume();
local_irq_enable();
dpm_resume_noirq(PMSG_RESUME);
dpm_resume_start(PMSG_RESUME);
}
static apm_event_t get_event(void)
+194 -57
View File
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node)
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|| pdd->dev->power.irq_safe))
|| pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
if (dev_gpd_data(dev)->always_on)
return -EBUSY;
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev)
genpd_acquire_lock(genpd);
if (genpd->prepared_count++ == 0)
if (genpd->prepared_count++ == 0) {
genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
}
genpd_release_lock(genpd);
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev)
}
/**
* pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
* pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a late suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
static int pm_genpd_suspend_noirq(struct device *dev)
static int pm_genpd_suspend_late(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->suspend_power_off)
return 0;
return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
}
ret = genpd_suspend_late(genpd, dev);
if (ret)
return ret;
/**
* pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend.
*
* Stop the device and remove power from the domain if all devices in it have
* been stopped.
*/
static int pm_genpd_suspend_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
genpd_stop_dev(genpd, dev);
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
* pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
* pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Carry out an early resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
* Restore power to the device's PM domain, if necessary, and start the device.
*/
static int pm_genpd_resume_noirq(struct device *dev)
{
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->suspend_power_off)
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
/*
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
genpd_start_dev(genpd, dev);
return genpd_resume_early(genpd, dev);
return genpd_start_dev(genpd, dev);
}
/**
* pm_genpd_resume - Resume a device belonging to an I/O power domain.
* pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
* @dev: Device to resume.
*
* Carry out an early resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_resume_early(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
}
/**
* pm_genpd_resume - Resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Resume a device under the assumption that its pm_domain field points to the
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev)
}
/**
* pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
* pm_genpd_freeze - Freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Freeze a device under the assumption that its pm_domain field points to the
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev)
}
/**
* pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
* pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_freeze_late(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
}
/**
* pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -957,26 +1015,16 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->suspend_power_off)
return 0;
ret = genpd_freeze_late(genpd, dev);
if (ret)
return ret;
genpd_stop_dev(genpd, dev);
return 0;
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
0 : genpd_stop_dev(genpd, dev);
}
/**
* pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
* pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Carry out an early thaw of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
* Start the device, unless power has been removed from the domain already
* before the system transition.
*/
static int pm_genpd_thaw_noirq(struct device *dev)
{
@@ -988,12 +1036,30 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
if (genpd->suspend_power_off)
return 0;
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
0 : genpd_start_dev(genpd, dev);
}
genpd_start_dev(genpd, dev);
/**
* pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Carry out an early thaw of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_thaw_early(struct device *dev)
{
struct generic_pm_domain *genpd;
return genpd_thaw_early(genpd, dev);
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
}
/**
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev)
}
/**
* pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
* pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
* Carry out an early restore of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
* Make sure the domain will be in the same power state as before the
* hibernation the system is resuming from and start the device if necessary.
*/
static int pm_genpd_restore_noirq(struct device *dev)
{
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev)
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*
* At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle.
*/
if (genpd->suspended_count++ == 0) {
/*
* The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to pm_genpd_poweron(), so
* that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
/*
* The boot kernel might put the domain into the power on state,
* so make sure it really is powered off.
* If the domain was off before the hibernation, make
* sure it will be off going forward.
*/
if (genpd->power_off)
genpd->power_off(genpd);
return 0;
}
}
if (genpd->suspend_power_off)
return 0;
pm_genpd_poweron(genpd);
genpd->suspended_count--;
genpd_start_dev(genpd, dev);
return genpd_resume_early(genpd, dev);
return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_prepare NULL
#define pm_genpd_suspend NULL
#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL
#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL
#define pm_genpd_resume NULL
#define pm_genpd_freeze NULL
#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL
@@ -1170,6 +1250,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
return ret;
}
/**
* __pm_genpd_of_add_device - Add a device to an I/O PM domain.
* @genpd_node: Device tree node pointer representing a PM domain to which the
* the device is added to.
* @dev: Device to be added.
* @td: Set of PM QoS timing parameters to attach to the device.
*/
int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
struct gpd_timing_data *td)
{
struct generic_pm_domain *genpd = NULL, *gpd;
dev_dbg(dev, "%s()\n", __func__);
if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
return -EINVAL;
mutex_lock(&gpd_list_lock);
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
if (gpd->of_node == genpd_node) {
genpd = gpd;
break;
}
}
mutex_unlock(&gpd_list_lock);
if (!genpd)
return -EINVAL;
return __pm_genpd_add_device(genpd, dev, td);
}
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
@@ -1215,6 +1327,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
return ret;
}
/**
* pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
* @dev: Device to set/unset the flag for.
* @val: The new value of the device's "always on" flag.
*/
void pm_genpd_dev_always_on(struct device *dev, bool val)
{
struct pm_subsys_data *psd;
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
psd = dev_to_psd(dev);
if (psd && psd->domain_data)
to_gpd_data(psd->domain_data)->always_on = val;
spin_unlock_irqrestore(&dev->power.lock, flags);
}
EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
return cb ? cb(dev) : pm_generic_suspend_late(dev);
}
/**
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
return cb ? cb(dev) : pm_generic_resume_noirq(dev);
return cb ? cb(dev) : pm_generic_resume_early(dev);
}
/**
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
return cb ? cb(dev) : pm_generic_freeze_late(dev);
}
/**
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
return cb ? cb(dev) : pm_generic_thaw_early(dev);
}
/**
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->poweroff_task = NULL;
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
genpd->domain.ops.resume_early = pm_genpd_resume_early;
genpd->domain.ops.resume = pm_genpd_resume;
genpd->domain.ops.freeze = pm_genpd_freeze;
genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
genpd->domain.ops.thaw = pm_genpd_thaw;
genpd->domain.ops.poweroff = pm_genpd_suspend;
genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
genpd->domain.ops.restore_early = pm_genpd_resume_early;
genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
genpd->dev_ops.save_state = pm_genpd_default_save_state;
+108 -57
View File
@@ -91,68 +91,39 @@ int pm_generic_prepare(struct device *dev)
return ret;
}
/**
* __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
* @dev: Device to handle.
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
* Execute the PM callback corresponding to @event provided by the driver of
* @dev, if defined, and return its error code. Return 0 if the callback is
* not present.
*/
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
if (!pm)
return 0;
switch (event) {
case PM_EVENT_SUSPEND:
callback = noirq ? pm->suspend_noirq : pm->suspend;
break;
case PM_EVENT_FREEZE:
callback = noirq ? pm->freeze_noirq : pm->freeze;
break;
case PM_EVENT_HIBERNATE:
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
case PM_EVENT_RESUME:
callback = noirq ? pm->resume_noirq : pm->resume;
break;
case PM_EVENT_THAW:
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
case PM_EVENT_RESTORE:
callback = noirq ? pm->restore_noirq : pm->restore;
break;
default:
callback = NULL;
break;
}
return callback ? callback(dev) : 0;
}
/**
* pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
/**
* pm_generic_suspend_late - Generic suspend_late callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend ? pm->suspend(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
* pm_generic_freeze_late - Generic freeze_late callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze ? pm->freeze(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
/**
* pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_THAW, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
* pm_generic_thaw_early - Generic thaw_early callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_THAW, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw ? pm->thaw(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_RESUME, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
/**
* pm_generic_resume_early - Generic resume_early callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_early ? pm->resume_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_RESUME, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume ? pm->resume(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
/**
* pm_generic_restore_early - Generic restore_early callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_restore_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_early ? pm->restore_early(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore ? pm->restore(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
+226 -23
View File
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
LIST_HEAD(dpm_list);
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
LIST_HEAD(dpm_late_early_list);
LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
@@ -245,6 +246,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
return NULL;
}
/**
* pm_late_early_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
pm_message_t state)
{
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
return ops->suspend_late;
case PM_EVENT_RESUME:
return ops->resume_early;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
return ops->thaw_early;
case PM_EVENT_RESTORE:
return ops->restore_early;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
}
return NULL;
}
/**
* pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_RESUME(0);
if (dev->pm_domain) {
info = "EARLY power domain ";
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "EARLY type ";
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "EARLY class ";
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "EARLY bus ";
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
info = "EARLY driver ";
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
/**
* dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
* Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
void dpm_resume_noirq(pm_message_t state)
static void dpm_resume_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
int error;
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " noirq", error);
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
}
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int device_resume_early(struct device *dev, pm_message_t state)
{
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "early type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "early class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "early bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
error = dpm_run_callback(callback, dev, state, info);
TRACE_RESUME(error);
return error;
}
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.next);
int error;
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
error = device_resume_early(dev, state);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early");
resume_device_irqs();
}
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
* dpm_resume_start - Execute "noirq" and "early" device callbacks.
* @state: PM transition of the system being carried out.
*/
void dpm_resume_start(pm_message_t state)
{
dpm_resume_noirq(state);
dpm_resume_early(state);
}
EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
* device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
char *info = NULL;
if (dev->pm_domain) {
info = "LATE power domain ";
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "LATE type ";
info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "LATE class ";
info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "LATE bus ";
info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
info = "LATE driver ";
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
}
/**
* dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
* dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
static int dpm_suspend_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
pm_dev_err(dev, state, " noirq", error);
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
if (error)
dpm_resume_noirq(resume_event(state));
else
dpm_show_time(starttime, state, "late");
dpm_show_time(starttime, state, "noirq");
return error;
}
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int device_suspend_late(struct device *dev, pm_message_t state)
{
pm_callback_t callback = NULL;
char *info = NULL;
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
info = "late type ";
callback = pm_late_early_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
info = "late class ";
callback = pm_late_early_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
info = "late bus ";
callback = pm_late_early_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
return dpm_run_callback(callback, dev, state, info);
}
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_late(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
if (error)
dpm_resume_early(resume_event(state));
else
dpm_show_time(starttime, state, "late");
return error;
}
/**
* dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
* @state: PM transition of the system being carried out.
*/
int dpm_suspend_end(pm_message_t state)
{
int error = dpm_suspend_late(state);
return error ? : dpm_suspend_noirq(state);
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+4
View File
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
extern int pm_qos_sysfs_add(struct device *dev);
extern void pm_qos_sysfs_remove(struct device *dev);
#else /* CONFIG_PM */
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
static inline void rpm_sysfs_remove(struct device *dev) {}
static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
static inline void wakeup_sysfs_remove(struct device *dev) {}
static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
static inline void pm_qos_sysfs_remove(struct device *dev) {}
#endif
+61
View File
@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
/*
* If the device's PM QoS resume latency limit has been exposed to user
* space, it has to be hidden at this point.
*/
dev_pm_qos_hide_latency_limit(dev);
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
return error;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
#ifdef CONFIG_PM_RUNTIME
static void __dev_pm_qos_drop_user_request(struct device *dev)
{
dev_pm_qos_remove_request(dev->power.pq_req);
dev->power.pq_req = 0;
}
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
* @value: Initial value of the latency limit.
*/
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
struct dev_pm_qos_request *req;
int ret;
if (!device_is_registered(dev) || value < 0)
return -EINVAL;
if (dev->power.pq_req)
return -EEXIST;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = dev_pm_qos_add_request(dev, req, value);
if (ret < 0)
return ret;
dev->power.pq_req = req;
ret = pm_qos_sysfs_add(dev);
if (ret)
__dev_pm_qos_drop_user_request(dev);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
/**
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
if (dev->power.pq_req) {
pm_qos_sysfs_remove(dev);
__dev_pm_qos_drop_user_request(dev);
}
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
#endif /* CONFIG_PM_RUNTIME */
+47
View File
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
static ssize_t pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
}
static ssize_t pm_qos_latency_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t n)
{
s32 value;
int ret;
if (kstrtos32(buf, 0, &value))
return -EINVAL;
if (value < 0)
return -EINVAL;
ret = dev_pm_qos_update_request(dev->power.pq_req, value);
return ret < 0 ? ret : n;
}
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
pm_qos_latency_show, pm_qos_latency_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
static struct attribute *pm_qos_attrs[] = {
#ifdef CONFIG_PM_RUNTIME
&dev_attr_pm_qos_resume_latency_us.attr,
#endif /* CONFIG_PM_RUNTIME */
NULL,
};
static struct attribute_group pm_qos_attr_group = {
.name = power_group_name,
.attrs = pm_qos_attrs,
};
int dpm_sysfs_add(struct device *dev)
{
int rc;
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
int pm_qos_sysfs_add(struct device *dev)
{
return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
}
void pm_qos_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
}
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);

Some files were not shown because too many files have changed in this diff Show More