mirror of
https://github.com/armbian/linux-cix.git
synced 2026-01-06 12:30:45 -08:00
Merge tag 'timers-core-2022-12-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner:
"Updates for timers, timekeeping and drivers:
Core:
- The timer_shutdown[_sync]() infrastructure:
Tearing down timers can be tedious when there are circular
dependencies to other things which need to be torn down. A prime
example is timer and workqueue where the timer schedules work and
the work arms the timer.
What needs to prevented is that pending work which is drained via
destroy_workqueue() does not rearm the previously shutdown timer.
Nothing in that shutdown sequence relies on the timer being
functional.
The conclusion was that the semantics of timer_shutdown_sync()
should be:
- timer is not enqueued
- timer callback is not running
- timer cannot be rearmed
Preventing the rearming of shutdown timers is done by discarding
rearm attempts silently.
A warning for the case that a rearm attempt of a shutdown timer is
detected would not be really helpful because it's entirely unclear
how it should be acted upon. The only way to address such a case is
to add 'if (in_shutdown)' conditionals all over the place. This is
error prone and in most cases of teardown not required all.
- The real fix for the bluetooth HCI teardown based on
timer_shutdown_sync().
A larger scale conversion to timer_shutdown_sync() is work in
progress.
- Consolidation of VDSO time namespace helper functions
- Small fixes for timer and timerqueue
Drivers:
- Prevent integer overflow on the XGene-1 TVAL register which causes
an never ending interrupt storm.
- The usual set of new device tree bindings
- Small fixes and improvements all over the place"
* tag 'timers-core-2022-12-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
dt-bindings: timer: renesas,cmt: Add r8a779g0 CMT support
dt-bindings: timer: renesas,tmu: Add r8a779g0 support
clocksource/drivers/arm_arch_timer: Use kstrtobool() instead of strtobool()
clocksource/drivers/timer-ti-dm: Fix missing clk_disable_unprepare in dmtimer_systimer_init_clock()
clocksource/drivers/timer-ti-dm: Clear settings on probe and free
clocksource/drivers/timer-ti-dm: Make timer_get_irq static
clocksource/drivers/timer-ti-dm: Fix warning for omap_timer_match
clocksource/drivers/arm_arch_timer: Fix XGene-1 TVAL register math error
clocksource/drivers/timer-npcm7xx: Enable timer 1 clock before use
dt-bindings: timer: nuvoton,npcm7xx-timer: Allow specifying all clocks
dt-bindings: timer: rockchip: Add rockchip,rk3128-timer
clockevents: Repair kernel-doc for clockevent_delta2ns()
clocksource/drivers/ingenic-ost: Define pm functions properly in platform_driver struct
clocksource/drivers/sh_cmt: Access registers according to spec
vdso/timens: Refactor copy-pasted find_timens_vvar_page() helper into one copy
Bluetooth: hci_qca: Fix the teardown problem for real
timers: Update the documentation to reflect on the new timer_shutdown() API
timers: Provide timer_shutdown[_sync]()
timers: Add shutdown mechanism to the internal functions
timers: Split [try_to_]del_timer[_sync]() to prepare for shutdown mode
...
This commit is contained in:
@@ -1858,7 +1858,7 @@ unloaded. After a given module has been unloaded, any attempt to call
|
||||
one of its functions results in a segmentation fault. The module-unload
|
||||
functions must therefore cancel any delayed calls to loadable-module
|
||||
functions, for example, any outstanding mod_timer() must be dealt
|
||||
with via del_timer_sync() or similar.
|
||||
with via timer_shutdown_sync() or similar.
|
||||
|
||||
Unfortunately, there is no way to cancel an RCU callback; once you
|
||||
invoke call_rcu(), the callback function is eventually going to be
|
||||
|
||||
@@ -191,7 +191,7 @@ Here is a sample module which implements a basic per cpu counter using
|
||||
|
||||
static void __exit test_exit(void)
|
||||
{
|
||||
del_timer_sync(&test_timer);
|
||||
timer_shutdown_sync(&test_timer);
|
||||
}
|
||||
|
||||
module_init(test_init);
|
||||
|
||||
@@ -25,7 +25,13 @@ properties:
|
||||
- description: The timer interrupt of timer 0
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
items:
|
||||
- description: The reference clock for timer 0
|
||||
- description: The reference clock for timer 1
|
||||
- description: The reference clock for timer 2
|
||||
- description: The reference clock for timer 3
|
||||
- description: The reference clock for timer 4
|
||||
minItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
@@ -102,12 +102,14 @@ properties:
|
||||
- enum:
|
||||
- renesas,r8a779a0-cmt0 # 32-bit CMT0 on R-Car V3U
|
||||
- renesas,r8a779f0-cmt0 # 32-bit CMT0 on R-Car S4-8
|
||||
- renesas,r8a779g0-cmt0 # 32-bit CMT0 on R-Car V4H
|
||||
- const: renesas,rcar-gen4-cmt0 # 32-bit CMT0 on R-Car Gen4
|
||||
|
||||
- items:
|
||||
- enum:
|
||||
- renesas,r8a779a0-cmt1 # 48-bit CMT on R-Car V3U
|
||||
- renesas,r8a779f0-cmt1 # 48-bit CMT on R-Car S4-8
|
||||
- renesas,r8a779g0-cmt1 # 48-bit CMT on R-Car V4H
|
||||
- const: renesas,rcar-gen4-cmt1 # 48-bit CMT on R-Car Gen4
|
||||
|
||||
reg:
|
||||
|
||||
@@ -38,6 +38,7 @@ properties:
|
||||
- renesas,tmu-r8a77995 # R-Car D3
|
||||
- renesas,tmu-r8a779a0 # R-Car V3U
|
||||
- renesas,tmu-r8a779f0 # R-Car S4-8
|
||||
- renesas,tmu-r8a779g0 # R-Car V4H
|
||||
- const: renesas,tmu
|
||||
|
||||
reg:
|
||||
|
||||
@@ -18,6 +18,7 @@ properties:
|
||||
- enum:
|
||||
- rockchip,rv1108-timer
|
||||
- rockchip,rk3036-timer
|
||||
- rockchip,rk3128-timer
|
||||
- rockchip,rk3188-timer
|
||||
- rockchip,rk3228-timer
|
||||
- rockchip,rk3229-timer
|
||||
|
||||
@@ -967,7 +967,7 @@ you might do the following::
|
||||
|
||||
while (list) {
|
||||
struct foo *next = list->next;
|
||||
del_timer(&list->timer);
|
||||
timer_delete(&list->timer);
|
||||
kfree(list);
|
||||
list = next;
|
||||
}
|
||||
@@ -981,7 +981,7 @@ the lock after we spin_unlock_bh(), and then try to free
|
||||
the element (which has already been freed!).
|
||||
|
||||
This can be avoided by checking the result of
|
||||
del_timer(): if it returns 1, the timer has been deleted.
|
||||
timer_delete(): if it returns 1, the timer has been deleted.
|
||||
If 0, it means (in this case) that it is currently running, so we can
|
||||
do::
|
||||
|
||||
@@ -990,7 +990,7 @@ do::
|
||||
|
||||
while (list) {
|
||||
struct foo *next = list->next;
|
||||
if (!del_timer(&list->timer)) {
|
||||
if (!timer_delete(&list->timer)) {
|
||||
/* Give timer a chance to delete this */
|
||||
spin_unlock_bh(&list_lock);
|
||||
goto retry;
|
||||
@@ -1005,9 +1005,12 @@ do::
|
||||
Another common problem is deleting timers which restart themselves (by
|
||||
calling add_timer() at the end of their timer function).
|
||||
Because this is a fairly common case which is prone to races, you should
|
||||
use del_timer_sync() (``include/linux/timer.h``) to
|
||||
handle this case. It returns the number of times the timer had to be
|
||||
deleted before we finally stopped it from adding itself back in.
|
||||
use timer_delete_sync() (``include/linux/timer.h``) to handle this case.
|
||||
|
||||
Before freeing a timer, timer_shutdown() or timer_shutdown_sync() should be
|
||||
called which will keep it from being rearmed. Any subsequent attempt to
|
||||
rearm the timer will be silently ignored by the core code.
|
||||
|
||||
|
||||
Locking Speed
|
||||
=============
|
||||
@@ -1335,7 +1338,7 @@ lock.
|
||||
|
||||
- kfree()
|
||||
|
||||
- add_timer() and del_timer()
|
||||
- add_timer() and timer_delete()
|
||||
|
||||
Mutex API reference
|
||||
===================
|
||||
|
||||
@@ -118,7 +118,7 @@ existing timer wheel code, as it is mature and well suited. Sharing code
|
||||
was not really a win, due to the different data structures. Also, the
|
||||
hrtimer functions now have clearer behavior and clearer names - such as
|
||||
hrtimer_try_to_cancel() and hrtimer_cancel() [which are roughly
|
||||
equivalent to del_timer() and del_timer_sync()] - so there's no direct
|
||||
equivalent to timer_delete() and timer_delete_sync()] - so there's no direct
|
||||
1:1 mapping between them on the algorithmic level, and thus no real
|
||||
potential for code sharing either.
|
||||
|
||||
|
||||
@@ -990,7 +990,7 @@ potreste fare come segue::
|
||||
|
||||
while (list) {
|
||||
struct foo *next = list->next;
|
||||
del_timer(&list->timer);
|
||||
timer_delete(&list->timer);
|
||||
kfree(list);
|
||||
list = next;
|
||||
}
|
||||
@@ -1003,7 +1003,7 @@ e prenderà il *lock* solo dopo spin_unlock_bh(), e cercherà
|
||||
di eliminare il suo oggetto (che però è già stato eliminato).
|
||||
|
||||
Questo può essere evitato controllando il valore di ritorno di
|
||||
del_timer(): se ritorna 1, il temporizzatore è stato già
|
||||
timer_delete(): se ritorna 1, il temporizzatore è stato già
|
||||
rimosso. Se 0, significa (in questo caso) che il temporizzatore è in
|
||||
esecuzione, quindi possiamo fare come segue::
|
||||
|
||||
@@ -1012,7 +1012,7 @@ esecuzione, quindi possiamo fare come segue::
|
||||
|
||||
while (list) {
|
||||
struct foo *next = list->next;
|
||||
if (!del_timer(&list->timer)) {
|
||||
if (!timer_delete(&list->timer)) {
|
||||
/* Give timer a chance to delete this */
|
||||
spin_unlock_bh(&list_lock);
|
||||
goto retry;
|
||||
@@ -1026,10 +1026,8 @@ esecuzione, quindi possiamo fare come segue::
|
||||
Un altro problema è l'eliminazione dei temporizzatori che si riavviano
|
||||
da soli (chiamando add_timer() alla fine della loro esecuzione).
|
||||
Dato che questo è un problema abbastanza comune con una propensione
|
||||
alle corse critiche, dovreste usare del_timer_sync()
|
||||
(``include/linux/timer.h``) per gestire questo caso. Questa ritorna il
|
||||
numero di volte che il temporizzatore è stato interrotto prima che
|
||||
fosse in grado di fermarlo senza che si riavviasse.
|
||||
alle corse critiche, dovreste usare timer_delete_sync()
|
||||
(``include/linux/timer.h``) per gestire questo caso.
|
||||
|
||||
Velocità della sincronizzazione
|
||||
===============================
|
||||
@@ -1374,7 +1372,7 @@ contesto, o trattenendo un qualsiasi *lock*.
|
||||
|
||||
- kfree()
|
||||
|
||||
- add_timer() e del_timer()
|
||||
- add_timer() e timer_delete()
|
||||
|
||||
Riferimento per l'API dei Mutex
|
||||
===============================
|
||||
|
||||
@@ -185,7 +185,7 @@ UP之间没有不同的行为,在你的架构的 ``local.h`` 中包括 ``asm-g
|
||||
|
||||
static void __exit test_exit(void)
|
||||
{
|
||||
del_timer_sync(&test_timer);
|
||||
timer_shutdown_sync(&test_timer);
|
||||
}
|
||||
|
||||
module_init(test_init);
|
||||
|
||||
@@ -90,7 +90,7 @@ static void __init spear_clocksource_init(void)
|
||||
200, 16, clocksource_mmio_readw_up);
|
||||
}
|
||||
|
||||
static inline void timer_shutdown(struct clock_event_device *evt)
|
||||
static inline void spear_timer_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
u16 val = readw(gpt_base + CR(CLKEVT));
|
||||
|
||||
@@ -101,7 +101,7 @@ static inline void timer_shutdown(struct clock_event_device *evt)
|
||||
|
||||
static int spear_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
timer_shutdown(evt);
|
||||
spear_timer_shutdown(evt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -111,7 +111,7 @@ static int spear_set_oneshot(struct clock_event_device *evt)
|
||||
u16 val;
|
||||
|
||||
/* stop the timer */
|
||||
timer_shutdown(evt);
|
||||
spear_timer_shutdown(evt);
|
||||
|
||||
val = readw(gpt_base + CR(CLKEVT));
|
||||
val |= CTRL_ONE_SHOT;
|
||||
@@ -126,7 +126,7 @@ static int spear_set_periodic(struct clock_event_device *evt)
|
||||
u16 val;
|
||||
|
||||
/* stop the timer */
|
||||
timer_shutdown(evt);
|
||||
spear_timer_shutdown(evt);
|
||||
|
||||
period = clk_get_rate(gpt_clk) / HZ;
|
||||
period >>= CTRL_PRESCALER16;
|
||||
|
||||
@@ -151,28 +151,6 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||
mmap_read_unlock(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->vm_mm == current->mm))
|
||||
return current->nsproxy->time_ns->vvar_page;
|
||||
|
||||
/*
|
||||
* VM_PFNMAP | VM_IO protect .fault() handler from being called
|
||||
* through interfaces like /proc/$pid/mem or
|
||||
* process_vm_{readv,writev}() as long as there's no .access()
|
||||
* in special_mapping_vmops.
|
||||
* For more details check_vma_flags() and __access_remote_vm()
|
||||
*/
|
||||
WARN(1, "vvar_page accessed remotely");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
||||
|
||||
@@ -129,28 +129,6 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->vm_mm == current->mm))
|
||||
return current->nsproxy->time_ns->vvar_page;
|
||||
|
||||
/*
|
||||
* VM_PFNMAP | VM_IO protect .fault() handler from being called
|
||||
* through interfaces like /proc/$pid/mem or
|
||||
* process_vm_{readv,writev}() as long as there's no .access()
|
||||
* in special_mapping_vmops.
|
||||
* For more details check_vma_flags() and __access_remote_vm()
|
||||
*/
|
||||
WARN(1, "vvar_page accessed remotely");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
||||
|
||||
@@ -137,28 +137,6 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||
mmap_read_unlock(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->vm_mm == current->mm))
|
||||
return current->nsproxy->time_ns->vvar_page;
|
||||
|
||||
/*
|
||||
* VM_PFNMAP | VM_IO protect .fault() handler from being called
|
||||
* through interfaces like /proc/$pid/mem or
|
||||
* process_vm_{readv,writev}() as long as there's no .access()
|
||||
* in special_mapping_vmops.
|
||||
* For more details check_vma_flags() and __access_remote_vm()
|
||||
*/
|
||||
WARN(1, "vvar_page accessed remotely");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
||||
|
||||
@@ -44,21 +44,6 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
|
||||
return (struct vdso_data *)(vvar_page);
|
||||
}
|
||||
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->vm_mm == current->mm))
|
||||
return current->nsproxy->time_ns->vvar_page;
|
||||
/*
|
||||
* VM_PFNMAP | VM_IO protect .fault() handler from being called
|
||||
* through interfaces like /proc/$pid/mem or
|
||||
* process_vm_{readv,writev}() as long as there's no .access()
|
||||
* in special_mapping_vmops().
|
||||
* For more details check_vma_flags() and __access_remote_vm()
|
||||
*/
|
||||
WARN(1, "vvar_page accessed remotely");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The VVAR page layout depends on whether a task belongs to the root or
|
||||
* non-root time namespace. Whenever a task changes its namespace, the VVAR
|
||||
@@ -84,11 +69,6 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||
mmap_read_unlock(mm);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
||||
|
||||
@@ -98,24 +98,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
if (likely(vma->vm_mm == current->mm))
|
||||
return current->nsproxy->time_ns->vvar_page;
|
||||
|
||||
/*
|
||||
* VM_PFNMAP | VM_IO protect .fault() handler from being called
|
||||
* through interfaces like /proc/$pid/mem or
|
||||
* process_vm_{readv,writev}() as long as there's no .access()
|
||||
* in special_mapping_vmops().
|
||||
* For more details check_vma_flags() and __access_remote_vm()
|
||||
*/
|
||||
|
||||
WARN(1, "vvar_page accessed remotely");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The vvar page layout depends on whether a task belongs to the root or
|
||||
* non-root time namespace. Whenever a task changes its namespace, the VVAR
|
||||
@@ -140,11 +122,6 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
|
||||
|
||||
@@ -696,9 +696,15 @@ static int qca_close(struct hci_uart *hu)
|
||||
skb_queue_purge(&qca->tx_wait_q);
|
||||
skb_queue_purge(&qca->txq);
|
||||
skb_queue_purge(&qca->rx_memdump_q);
|
||||
/*
|
||||
* Shut the timers down so they can't be rearmed when
|
||||
* destroy_workqueue() drains pending work which in turn might try
|
||||
* to arm a timer. After shutdown rearm attempts are silently
|
||||
* ignored by the timer core code.
|
||||
*/
|
||||
timer_shutdown_sync(&qca->tx_idle_timer);
|
||||
timer_shutdown_sync(&qca->wake_retrans_timer);
|
||||
destroy_workqueue(qca->workqueue);
|
||||
del_timer_sync(&qca->tx_idle_timer);
|
||||
del_timer_sync(&qca->wake_retrans_timer);
|
||||
qca->hu = NULL;
|
||||
|
||||
kfree_skb(qca->rx_skb);
|
||||
|
||||
@@ -155,7 +155,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
|
||||
out:
|
||||
if (!priv->response_length) {
|
||||
*off = 0;
|
||||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
del_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->timeout_work);
|
||||
}
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
@@ -262,7 +262,7 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
|
||||
void tpm_common_release(struct file *file, struct file_priv *priv)
|
||||
{
|
||||
flush_work(&priv->async_work);
|
||||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
del_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->timeout_work);
|
||||
file->private_data = NULL;
|
||||
priv->response_length = 0;
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clocksource_ids.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kstrtox.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/io.h>
|
||||
@@ -97,7 +98,7 @@ static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EV
|
||||
|
||||
static int __init early_evtstrm_cfg(char *buf)
|
||||
{
|
||||
return strtobool(buf, &evtstrm_enable);
|
||||
return kstrtobool(buf, &evtstrm_enable);
|
||||
}
|
||||
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
|
||||
|
||||
@@ -687,8 +688,8 @@ static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
|
||||
return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
|
||||
}
|
||||
|
||||
static __always_inline int timer_shutdown(const int access,
|
||||
struct clock_event_device *clk)
|
||||
static __always_inline int arch_timer_shutdown(const int access,
|
||||
struct clock_event_device *clk)
|
||||
{
|
||||
unsigned long ctrl;
|
||||
|
||||
@@ -701,22 +702,22 @@ static __always_inline int timer_shutdown(const int access,
|
||||
|
||||
static int arch_timer_shutdown_virt(struct clock_event_device *clk)
|
||||
{
|
||||
return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
|
||||
return arch_timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
|
||||
}
|
||||
|
||||
static int arch_timer_shutdown_phys(struct clock_event_device *clk)
|
||||
{
|
||||
return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
|
||||
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
|
||||
}
|
||||
|
||||
static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
|
||||
{
|
||||
return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
|
||||
return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
|
||||
}
|
||||
|
||||
static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
|
||||
{
|
||||
return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
|
||||
return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
|
||||
}
|
||||
|
||||
static __always_inline void set_next_event(const int access, unsigned long evt,
|
||||
|
||||
@@ -141,7 +141,7 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused ingenic_ost_suspend(struct device *dev)
|
||||
static int ingenic_ost_suspend(struct device *dev)
|
||||
{
|
||||
struct ingenic_ost *ost = dev_get_drvdata(dev);
|
||||
|
||||
@@ -150,14 +150,14 @@ static int __maybe_unused ingenic_ost_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused ingenic_ost_resume(struct device *dev)
|
||||
static int ingenic_ost_resume(struct device *dev)
|
||||
{
|
||||
struct ingenic_ost *ost = dev_get_drvdata(dev);
|
||||
|
||||
return clk_enable(ost->clk);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops __maybe_unused ingenic_ost_pm_ops = {
|
||||
static const struct dev_pm_ops ingenic_ost_pm_ops = {
|
||||
/* _noirq: We want the OST clock to be gated last / ungated first */
|
||||
.suspend_noirq = ingenic_ost_suspend,
|
||||
.resume_noirq = ingenic_ost_resume,
|
||||
@@ -181,9 +181,7 @@ static const struct of_device_id ingenic_ost_of_match[] = {
|
||||
static struct platform_driver ingenic_ost_driver = {
|
||||
.driver = {
|
||||
.name = "ingenic-ost",
|
||||
#ifdef CONFIG_PM_SUSPEND
|
||||
.pm = &ingenic_ost_pm_ops,
|
||||
#endif
|
||||
.pm = pm_sleep_ptr(&ingenic_ost_pm_ops),
|
||||
.of_match_table = ingenic_ost_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user