2007-05-08 00:27:59 -07:00
/*
* linux/kernel/time/timekeeping.c
*
* Kernel timekeeping code and accessor functions
*
* This code was moved from linux/kernel/timer.c.
* Please see that file for copyright and history logs.
*
*/
2012-09-04 15:12:07 -04:00
# include <linux/timekeeper_internal.h>
2007-05-08 00:27:59 -07:00
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/percpu.h>
# include <linux/init.h>
# include <linux/mm.h>
2009-10-07 17:09:06 +04:00
# include <linux/sched.h>
2011-03-23 22:16:04 +01:00
# include <linux/syscore_ops.h>
2007-05-08 00:27:59 -07:00
# include <linux/clocksource.h>
# include <linux/jiffies.h>
# include <linux/time.h>
# include <linux/tick.h>
2009-08-14 15:47:30 +02:00
# include <linux/stop_machine.h>
2012-11-27 23:28:59 -02:00
# include <linux/pvclock_gtod.h>
2014-04-07 15:39:20 -07:00
# include <linux/compiler.h>
2007-05-08 00:27:59 -07:00
2013-02-21 22:51:36 +00:00
# include "tick-internal.h"
2013-03-22 11:31:29 -07:00
# include "ntp_internal.h"
2013-05-21 22:32:14 -07:00
# include "timekeeping_internal.h"
2009-08-14 15:47:26 +02:00
2013-06-27 11:35:45 +01:00
# define TK_CLEAR_NTP (1 << 0)
# define TK_MIRROR (1 << 1)
2013-06-27 11:35:46 +01:00
# define TK_CLOCK_WAS_SET (1 << 2)
2013-06-27 11:35:45 +01:00
2014-07-16 21:04:07 +00:00
/*
* The most important data for readout fits into a single 64 byte
* cache line.
*/
static struct {
seqcount_t seq ;
struct timekeeper timekeeper ;
} tk_core ____cacheline_aligned ;
2013-02-21 22:51:38 +00:00
static DEFINE_RAW_SPINLOCK ( timekeeper_lock ) ;
2013-02-21 22:51:40 +00:00
static struct timekeeper shadow_timekeeper ;
2009-08-14 15:47:26 +02:00
2014-07-16 21:05:23 +00:00
/**
* struct tk_fast - NMI safe timekeeper
* @seq: Sequence counter for protecting updates. The lowest bit
* is the index for the tk_read_base array
* @base: tk_read_base array. Access is indexed by the lowest bit of
* @seq.
*
* See @update_fast_timekeeper() below.
*/
struct tk_fast {
seqcount_t seq ;
struct tk_read_base base [ 2 ] ;
} ;
static struct tk_fast tk_fast_mono ____cacheline_aligned ;
2011-11-14 11:46:39 -08:00
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended ;
2013-01-16 00:09:47 +08:00
/* Flag for if there is a persistent clock on this platform */
bool __read_mostly persistent_clock_exist = false ;
2012-07-13 01:21:53 -04:00
static inline void tk_normalize_xtime ( struct timekeeper * tk )
{
2014-07-16 21:05:16 +00:00
while ( tk - > tkr . xtime_nsec > = ( ( u64 ) NSEC_PER_SEC < < tk - > tkr . shift ) ) {
tk - > tkr . xtime_nsec - = ( u64 ) NSEC_PER_SEC < < tk - > tkr . shift ;
2012-07-13 01:21:53 -04:00
tk - > xtime_sec + + ;
}
}
2011-11-14 11:46:39 -08:00
2014-07-16 21:04:05 +00:00
static inline struct timespec64 tk_xtime ( struct timekeeper * tk )
{
struct timespec64 ts ;
ts . tv_sec = tk - > xtime_sec ;
2014-07-16 21:05:16 +00:00
ts . tv_nsec = ( long ) ( tk - > tkr . xtime_nsec > > tk - > tkr . shift ) ;
2014-07-16 21:04:05 +00:00
return ts ;
}
2014-07-16 21:04:01 +00:00
static void tk_set_xtime ( struct timekeeper * tk , const struct timespec64 * ts )
2012-07-13 01:21:53 -04:00
{
tk - > xtime_sec = ts - > tv_sec ;
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec = ( u64 ) ts - > tv_nsec < < tk - > tkr . shift ;
2012-07-13 01:21:53 -04:00
}
2014-07-16 21:04:01 +00:00
static void tk_xtime_add ( struct timekeeper * tk , const struct timespec64 * ts )
2012-07-13 01:21:53 -04:00
{
tk - > xtime_sec + = ts - > tv_sec ;
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec + = ( u64 ) ts - > tv_nsec < < tk - > tkr . shift ;
2012-08-21 20:30:46 -04:00
tk_normalize_xtime ( tk ) ;
2012-07-13 01:21:53 -04:00
}
2011-11-14 11:46:39 -08:00
2014-07-16 21:04:01 +00:00
static void tk_set_wall_to_mono ( struct timekeeper * tk , struct timespec64 wtm )
2012-07-27 14:48:12 -04:00
{
2014-07-16 21:04:01 +00:00
struct timespec64 tmp ;
2012-07-27 14:48:12 -04:00
/*
* Verify consistency of: offset_real = -wall_to_monotonic
* before modifying anything
*/
2014-07-16 21:04:01 +00:00
set_normalized_timespec64 ( & tmp , - tk - > wall_to_monotonic . tv_sec ,
2012-07-27 14:48:12 -04:00
- tk - > wall_to_monotonic . tv_nsec ) ;
2014-07-16 21:04:01 +00:00
WARN_ON_ONCE ( tk - > offs_real . tv64 ! = timespec64_to_ktime ( tmp ) . tv64 ) ;
2012-07-27 14:48:12 -04:00
tk - > wall_to_monotonic = wtm ;
2014-07-16 21:04:01 +00:00
set_normalized_timespec64 ( & tmp , - wtm . tv_sec , - wtm . tv_nsec ) ;
tk - > offs_real = timespec64_to_ktime ( tmp ) ;
2013-12-10 17:13:35 -08:00
tk - > offs_tai = ktime_add ( tk - > offs_real , ktime_set ( tk - > tai_offset , 0 ) ) ;
2012-07-27 14:48:12 -04:00
}
2014-07-16 21:05:00 +00:00
static inline void tk_update_sleep_time ( struct timekeeper * tk , ktime_t delta )
2012-07-27 14:48:12 -04:00
{
2014-07-16 21:05:00 +00:00
tk - > offs_boot = ktime_add ( tk - > offs_boot , delta ) ;
2012-07-27 14:48:12 -04:00
}
2015-03-11 21:16:32 -07:00
# ifdef CONFIG_DEBUG_TIMEKEEPING
static void timekeeping_check_update ( struct timekeeper * tk , cycle_t offset )
{
cycle_t max_cycles = tk - > tkr . clock - > max_cycles ;
const char * name = tk - > tkr . clock - > name ;
if ( offset > max_cycles ) {
2015-03-11 21:16:33 -07:00
printk_deferred ( " WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger \n " ,
2015-03-11 21:16:32 -07:00
offset , name , max_cycles ) ;
2015-03-11 21:16:33 -07:00
printk_deferred ( " timekeeping: Your kernel is sick, but tries to cope by capping time updates \n " ) ;
2015-03-11 21:16:32 -07:00
} else {
if ( offset > ( max_cycles > > 1 ) ) {
printk_deferred ( " INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld) \n " ,
offset , name , max_cycles > > 1 ) ;
printk_deferred ( " timekeeping: Your kernel is still fine, but is feeling a bit nervous \n " ) ;
}
}
}
2015-03-11 21:16:33 -07:00
static inline cycle_t timekeeping_get_delta ( struct tk_read_base * tkr )
{
cycle_t cycle_now , delta ;
/* read clocksource */
cycle_now = tkr - > read ( tkr - > clock ) ;
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta ( cycle_now , tkr - > cycle_last , tkr - > mask ) ;
/* Cap delta value to the max_cycles values to avoid mult overflows */
if ( unlikely ( delta > tkr - > clock - > max_cycles ) )
delta = tkr - > clock - > max_cycles ;
return delta ;
}
2015-03-11 21:16:32 -07:00
# else
static inline void timekeeping_check_update ( struct timekeeper * tk , cycle_t offset )
{
}
2015-03-11 21:16:33 -07:00
static inline cycle_t timekeeping_get_delta ( struct tk_read_base * tkr )
{
cycle_t cycle_now , delta ;
/* read clocksource */
cycle_now = tkr - > read ( tkr - > clock ) ;
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta ( cycle_now , tkr - > cycle_last , tkr - > mask ) ;
return delta ;
}
2015-03-11 21:16:32 -07:00
# endif
2009-08-14 15:47:26 +02:00
/**
2013-11-28 16:28:55 +08:00
* tk_setup_internals - Set up internals to use clocksource clock.
2009-08-14 15:47:26 +02:00
*
2013-11-28 16:28:55 +08:00
* @tk: The target timekeeper to setup.
2009-08-14 15:47:26 +02:00
* @clock: Pointer to clocksource.
*
* Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
* pair and interval request.
*
* Unless you're the timekeeping code, you should not be using this!
*/
2012-07-13 01:21:57 -04:00
static void tk_setup_internals ( struct timekeeper * tk , struct clocksource * clock )
2009-08-14 15:47:26 +02:00
{
cycle_t interval ;
2010-10-20 15:55:15 -07:00
u64 tmp , ntpinterval ;
2012-07-13 01:21:53 -04:00
struct clocksource * old_clock ;
2009-08-14 15:47:26 +02:00
2014-07-16 21:05:16 +00:00
old_clock = tk - > tkr . clock ;
tk - > tkr . clock = clock ;
tk - > tkr . read = clock - > read ;
tk - > tkr . mask = clock - > mask ;
tk - > tkr . cycle_last = tk - > tkr . read ( clock ) ;
2009-08-14 15:47:26 +02:00
/* Do the ns -> cycle conversion first, using original mult */
tmp = NTP_INTERVAL_LENGTH ;
tmp < < = clock - > shift ;
2010-10-20 15:55:15 -07:00
ntpinterval = tmp ;
2009-08-14 15:47:28 +02:00
tmp + = clock - > mult / 2 ;
do_div ( tmp , clock - > mult ) ;
2009-08-14 15:47:26 +02:00
if ( tmp = = 0 )
tmp = 1 ;
interval = ( cycle_t ) tmp ;
2012-07-13 01:21:57 -04:00
tk - > cycle_interval = interval ;
2009-08-14 15:47:26 +02:00
/* Go back from cycles -> shifted ns */
2012-07-13 01:21:57 -04:00
tk - > xtime_interval = ( u64 ) interval * clock - > mult ;
tk - > xtime_remainder = ntpinterval - tk - > xtime_interval ;
tk - > raw_interval =
2009-08-14 15:47:28 +02:00
( ( u64 ) interval * clock - > mult ) > > clock - > shift ;
2009-08-14 15:47:26 +02:00
2012-07-13 01:21:53 -04:00
/* if changing clocks, convert xtime_nsec shift units */
if ( old_clock ) {
int shift_change = clock - > shift - old_clock - > shift ;
if ( shift_change < 0 )
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec > > = - shift_change ;
2012-07-13 01:21:53 -04:00
else
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec < < = shift_change ;
2012-07-13 01:21:53 -04:00
}
2014-07-16 21:05:16 +00:00
tk - > tkr . shift = clock - > shift ;
2009-08-14 15:47:26 +02:00
2012-07-13 01:21:57 -04:00
tk - > ntp_error = 0 ;
tk - > ntp_error_shift = NTP_SCALE_SHIFT - clock - > shift ;
2014-04-23 20:53:29 -07:00
tk - > ntp_tick = ntpinterval < < tk - > ntp_error_shift ;
2009-08-14 15:47:28 +02:00
/*
* The timekeeper keeps its own mult values for the currently
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
2014-07-16 21:05:16 +00:00
tk - > tkr . mult = clock - > mult ;
2013-12-06 17:25:21 -08:00
tk - > ntp_err_mult = 0 ;
2009-08-14 15:47:26 +02:00
}
2007-05-08 00:27:59 -07:00
2009-08-14 15:47:29 +02:00
/* Timekeeper helper functions. */
2012-11-07 17:58:54 -07:00
# ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2014-07-16 21:03:50 +00:00
static u32 default_arch_gettimeoffset ( void ) { return 0 ; }
u32 ( * arch_gettimeoffset ) ( void ) = default_arch_gettimeoffset ;
2012-11-07 17:58:54 -07:00
# else
2014-07-16 21:03:50 +00:00
static inline u32 arch_gettimeoffset ( void ) { return 0 ; }
2012-11-07 17:58:54 -07:00
# endif
2014-07-16 21:05:18 +00:00
static inline s64 timekeeping_get_ns ( struct tk_read_base * tkr )
2009-08-14 15:47:29 +02:00
{
2015-03-11 21:16:33 -07:00
cycle_t delta ;
2012-07-13 01:21:53 -04:00
s64 nsec ;
2009-08-14 15:47:29 +02:00
2015-03-11 21:16:33 -07:00
delta = timekeeping_get_delta ( tkr ) ;
2009-08-14 15:47:29 +02:00
2014-07-16 21:05:18 +00:00
nsec = delta * tkr - > mult + tkr - > xtime_nsec ;
nsec > > = tkr - > shift ;
2012-07-13 01:21:55 -04:00
2012-11-07 17:58:54 -07:00
/* If arch requires, add in get_arch_timeoffset() */
2014-07-16 21:03:50 +00:00
return nsec + arch_gettimeoffset ( ) ;
2009-08-14 15:47:29 +02:00
}
2012-07-13 01:21:57 -04:00
static inline s64 timekeeping_get_ns_raw ( struct timekeeper * tk )
2009-08-14 15:47:29 +02:00
{
2014-07-16 21:05:16 +00:00
struct clocksource * clock = tk - > tkr . clock ;
2015-03-11 21:16:33 -07:00
cycle_t delta ;
2012-07-13 01:21:55 -04:00
s64 nsec ;
2009-08-14 15:47:29 +02:00
2015-03-11 21:16:33 -07:00
delta = timekeeping_get_delta ( & tk - > tkr ) ;
2009-08-14 15:47:29 +02:00
2012-07-13 01:21:55 -04:00
/* convert delta to nanoseconds. */
2014-07-16 21:05:10 +00:00
nsec = clocksource_cyc2ns ( delta , clock - > mult , clock - > shift ) ;
2012-07-13 01:21:55 -04:00
2012-11-07 17:58:54 -07:00
/* If arch requires, add in get_arch_timeoffset() */
2014-07-16 21:03:50 +00:00
return nsec + arch_gettimeoffset ( ) ;
2009-08-14 15:47:29 +02:00
}
2014-07-16 21:05:23 +00:00
/**
* update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
2015-02-11 05:01:52 +01:00
* @tkr: Timekeeping readout base from which we take the update
2014-07-16 21:05:23 +00:00
*
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
* So we handle this differently than the other timekeeping accessor
* functions which retry when the sequence count has changed. The
* update side does:
*
* smp_wmb(); <- Ensure that the last base[1] update is visible
* tkf->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
2015-02-11 05:01:52 +01:00
* update(tkf->base[0], tkr);
2014-07-16 21:05:23 +00:00
* smp_wmb(); <- Ensure that the base[0] update is visible
* tkf->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
2015-02-11 05:01:52 +01:00
* update(tkf->base[1], tkr);
2014-07-16 21:05:23 +00:00
*
* The reader side does:
*
* do {
* seq = tkf->seq;
* smp_rmb();
* idx = seq & 0x01;
* now = now(tkf->base[idx]);
* smp_rmb();
* } while (seq != tkf->seq)
*
* As long as we update base[0] readers are forced off to
* base[1]. Once base[0] is updated readers are redirected to base[0]
* and the base[1] update takes place.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
* slightly wrong timestamp (a few nanoseconds). See
* @ktime_get_mono_fast_ns.
*/
2015-02-11 05:01:52 +01:00
static void update_fast_timekeeper ( struct tk_read_base * tkr )
2014-07-16 21:05:23 +00:00
{
struct tk_read_base * base = tk_fast_mono . base ;
/* Force readers off to base[1] */
raw_write_seqcount_latch ( & tk_fast_mono . seq ) ;
/* Update base[0] */
2015-02-11 05:01:52 +01:00
memcpy ( base , tkr , sizeof ( * base ) ) ;
2014-07-16 21:05:23 +00:00
/* Force readers back to base[0] */
raw_write_seqcount_latch ( & tk_fast_mono . seq ) ;
/* Update base[1] */
memcpy ( base + 1 , base , sizeof ( * base ) ) ;
}
/**
* ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
*
* This timestamp is not guaranteed to be monotonic across an update.
* The timestamp is calculated by:
*
* now = base_mono + clock_delta * slope
*
* So if the update lowers the slope, readers who are forced to the
* not yet updated second array are still using the old steeper slope.
*
* tmono
* ^
* | o n
* | o n
* | u
* | o
* |o
* |12345678---> reader order
*
* o = old slope
* u = update
* n = new slope
*
* So reader 6 will observe time going backwards versus reader 5.
*
* While other CPUs are likely to be able observe that, the only way
* for a CPU local observation is when an NMI hits in the middle of
* the update. Timestamps taken from that NMI context might be ahead
* of the following timestamps. Callers need to be aware of that and
* deal with it.
*/
u64 notrace ktime_get_mono_fast_ns ( void )
{
struct tk_read_base * tkr ;
unsigned int seq ;
u64 now ;
do {
seq = raw_read_seqcount ( & tk_fast_mono . seq ) ;
tkr = tk_fast_mono . base + ( seq & 0x01 ) ;
now = ktime_to_ns ( tkr - > base_mono ) + timekeeping_get_ns ( tkr ) ;
} while ( read_seqcount_retry ( & tk_fast_mono . seq , seq ) ) ;
return now ;
}
EXPORT_SYMBOL_GPL ( ktime_get_mono_fast_ns ) ;
2015-02-13 14:49:02 +01:00
/* Suspend-time cycles value for halted fast timekeeper. */
static cycle_t cycles_at_suspend ;
static cycle_t dummy_clock_read ( struct clocksource * cs )
{
return cycles_at_suspend ;
}
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
*
* It generally is unsafe to access the clocksource after timekeeping has been
* suspended, so take a snapshot of the readout base of @tk and use it as the
* fast timekeeper's readout base while suspended. It will return the same
* number of cycles every time until timekeeping is resumed at which time the
* proper readout base for the fast timekeeper will be restored automatically.
*/
static void halt_fast_timekeeper ( struct timekeeper * tk )
{
static struct tk_read_base tkr_dummy ;
struct tk_read_base * tkr = & tk - > tkr ;
memcpy ( & tkr_dummy , tkr , sizeof ( tkr_dummy ) ) ;
cycles_at_suspend = tkr - > read ( tkr - > clock ) ;
tkr_dummy . read = dummy_clock_read ;
update_fast_timekeeper ( & tkr_dummy ) ;
}
2014-07-16 21:04:05 +00:00
# ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
static inline void update_vsyscall ( struct timekeeper * tk )
{
2014-08-13 12:47:14 -07:00
struct timespec xt , wm ;
2014-07-16 21:04:05 +00:00
2014-07-23 14:35:39 -07:00
xt = timespec64_to_timespec ( tk_xtime ( tk ) ) ;
2014-08-13 12:47:14 -07:00
wm = timespec64_to_timespec ( tk - > wall_to_monotonic ) ;
update_vsyscall_old ( & xt , & wm , tk - > tkr . clock , tk - > tkr . mult ,
2014-07-16 21:05:16 +00:00
tk - > tkr . cycle_last ) ;
2014-07-16 21:04:05 +00:00
}
static inline void old_vsyscall_fixup ( struct timekeeper * tk )
{
s64 remainder ;
/*
* Store only full nanoseconds into xtime_nsec after rounding
* it up and add the remainder to the error difference.
* XXX - This is necessary to avoid small 1ns inconsistnecies caused
* by truncating the remainder in vsyscalls. However, it causes
* additional work to be done in timekeeping_adjust(). Once
* the vsyscall implementations are converted to use xtime_nsec
* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
* users are removed, this can be killed.
*/
2014-07-16 21:05:16 +00:00
remainder = tk - > tkr . xtime_nsec & ( ( 1ULL < < tk - > tkr . shift ) - 1 ) ;
tk - > tkr . xtime_nsec - = remainder ;
tk - > tkr . xtime_nsec + = 1ULL < < tk - > tkr . shift ;
2014-07-16 21:04:05 +00:00
tk - > ntp_error + = remainder < < tk - > ntp_error_shift ;
2014-07-16 21:05:16 +00:00
tk - > ntp_error - = ( 1ULL < < tk - > tkr . shift ) < < tk - > ntp_error_shift ;
2014-07-16 21:04:05 +00:00
}
# else
# define old_vsyscall_fixup(tk)
# endif
2012-11-27 23:28:59 -02:00
static RAW_NOTIFIER_HEAD ( pvclock_gtod_chain ) ;
2013-06-27 11:35:46 +01:00
static void update_pvclock_gtod ( struct timekeeper * tk , bool was_set )
2012-11-27 23:28:59 -02:00
{
2013-06-27 11:35:46 +01:00
raw_notifier_call_chain ( & pvclock_gtod_chain , was_set , tk ) ;
2012-11-27 23:28:59 -02:00
}
/**
* pvclock_gtod_register_notifier - register a pvclock timedata update listener
*/
int pvclock_gtod_register_notifier ( struct notifier_block * nb )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-11-27 23:28:59 -02:00
unsigned long flags ;
int ret ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2012-11-27 23:28:59 -02:00
ret = raw_notifier_chain_register ( & pvclock_gtod_chain , nb ) ;
2013-06-27 11:35:46 +01:00
update_pvclock_gtod ( tk , true ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2012-11-27 23:28:59 -02:00
return ret ;
}
EXPORT_SYMBOL_GPL ( pvclock_gtod_register_notifier ) ;
/**
* pvclock_gtod_unregister_notifier - unregister a pvclock
* timedata update listener
*/
int pvclock_gtod_unregister_notifier ( struct notifier_block * nb )
{
unsigned long flags ;
int ret ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2012-11-27 23:28:59 -02:00
ret = raw_notifier_chain_unregister ( & pvclock_gtod_chain , nb ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2012-11-27 23:28:59 -02:00
return ret ;
}
EXPORT_SYMBOL_GPL ( pvclock_gtod_unregister_notifier ) ;
2014-07-16 21:04:10 +00:00
/*
* Update the ktime_t based scalar nsec members of the timekeeper
*/
static inline void tk_update_ktime_data ( struct timekeeper * tk )
{
2014-10-29 16:01:16 +05:30
u64 seconds ;
u32 nsec ;
2014-07-16 21:04:10 +00:00
/*
* The xtime based monotonic readout is:
* nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
* The ktime based monotonic readout is:
* nsec = base_mono + now();
* ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
*/
2014-10-29 16:01:16 +05:30
seconds = ( u64 ) ( tk - > xtime_sec + tk - > wall_to_monotonic . tv_sec ) ;
nsec = ( u32 ) tk - > wall_to_monotonic . tv_nsec ;
tk - > tkr . base_mono = ns_to_ktime ( seconds * NSEC_PER_SEC + nsec ) ;
2014-07-16 21:05:04 +00:00
/* Update the monotonic raw base */
tk - > base_raw = timespec64_to_ktime ( tk - > raw_time ) ;
2014-10-29 16:01:16 +05:30
/*
* The sum of the nanoseconds portions of xtime and
* wall_to_monotonic can be greater/equal one second. Take
* this into account before updating tk->ktime_sec.
*/
nsec + = ( u32 ) ( tk - > tkr . xtime_nsec > > tk - > tkr . shift ) ;
if ( nsec > = NSEC_PER_SEC )
seconds + + ;
tk - > ktime_sec = seconds ;
2014-07-16 21:04:10 +00:00
}
2013-02-21 22:51:38 +00:00
/* must hold timekeeper_lock */
2013-06-27 11:35:45 +01:00
static void timekeeping_update ( struct timekeeper * tk , unsigned int action )
2011-11-13 23:19:49 +00:00
{
2013-06-27 11:35:45 +01:00
if ( action & TK_CLEAR_NTP ) {
2012-07-13 01:21:57 -04:00
tk - > ntp_error = 0 ;
2011-11-13 23:19:49 +00:00
ntp_clear ( ) ;
}
2013-02-21 22:51:40 +00:00
2014-07-16 21:04:10 +00:00
tk_update_ktime_data ( tk ) ;
2014-09-06 12:24:49 +02:00
update_vsyscall ( tk ) ;
update_pvclock_gtod ( tk , action & TK_CLOCK_WAS_SET ) ;
2013-06-27 11:35:45 +01:00
if ( action & TK_MIRROR )
2014-07-16 21:04:07 +00:00
memcpy ( & shadow_timekeeper , & tk_core . timekeeper ,
sizeof ( tk_core . timekeeper ) ) ;
2014-07-16 21:05:23 +00:00
2015-02-11 05:01:52 +01:00
update_fast_timekeeper ( & tk - > tkr ) ;
2011-11-13 23:19:49 +00:00
}
2007-05-08 00:27:59 -07:00
/**
2009-08-14 15:47:26 +02:00
* timekeeping_forward_now - update clock to the current time
2007-05-08 00:27:59 -07:00
*
2008-08-20 16:37:28 -07:00
* Forward the current clock to update its state since the last call to
* update_wall_time(). This is useful before significant clock changes,
* as it avoids having to deal with this time offset explicitly.
2007-05-08 00:27:59 -07:00
*/
2012-07-13 01:21:57 -04:00
static void timekeeping_forward_now ( struct timekeeper * tk )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:05:16 +00:00
struct clocksource * clock = tk - > tkr . clock ;
2014-07-16 21:05:10 +00:00
cycle_t cycle_now , delta ;
2008-08-20 16:37:28 -07:00
s64 nsec ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:05:16 +00:00
cycle_now = tk - > tkr . read ( clock ) ;
delta = clocksource_delta ( cycle_now , tk - > tkr . cycle_last , tk - > tkr . mask ) ;
tk - > tkr . cycle_last = cycle_now ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec + = delta * tk - > tkr . mult ;
2009-05-01 13:10:26 -07:00
2012-11-07 17:58:54 -07:00
/* If arch requires, add in get_arch_timeoffset() */
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec + = ( u64 ) arch_gettimeoffset ( ) < < tk - > tkr . shift ;
2009-05-01 13:10:26 -07:00
2012-07-13 01:21:57 -04:00
tk_normalize_xtime ( tk ) ;
2008-08-20 16:37:30 -07:00
2014-07-16 21:05:10 +00:00
nsec = clocksource_cyc2ns ( delta , clock - > mult , clock - > shift ) ;
2014-07-16 21:04:01 +00:00
timespec64_add_ns ( & tk - > raw_time , nsec ) ;
2007-05-08 00:27:59 -07:00
}
/**
2014-07-16 21:04:04 +00:00
* __getnstimeofday64 - Returns the time of day in a timespec64.
2007-05-08 00:27:59 -07:00
* @ts: pointer to the timespec to be set
*
2012-11-19 10:26:16 -08:00
* Updates the time of day in the timespec.
* Returns 0 on success, or -ve when suspended (timespec will be undefined).
2007-05-08 00:27:59 -07:00
*/
2014-07-16 21:04:04 +00:00
int __getnstimeofday64 ( struct timespec64 * ts )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2007-05-08 00:27:59 -07:00
unsigned long seq ;
2012-07-13 01:21:53 -04:00
s64 nsecs = 0 ;
2007-05-08 00:27:59 -07:00
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2007-05-08 00:27:59 -07:00
2012-07-27 14:48:13 -04:00
ts - > tv_sec = tk - > xtime_sec ;
2014-07-16 21:05:18 +00:00
nsecs = timekeeping_get_ns ( & tk - > tkr ) ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2007-05-08 00:27:59 -07:00
2012-09-11 19:26:03 -04:00
ts - > tv_nsec = 0 ;
2014-07-16 21:04:04 +00:00
timespec64_add_ns ( ts , nsecs ) ;
2012-11-19 10:26:16 -08:00
/*
* Do not bail out early, in case there were callers still using
* the value, even in the face of the WARN_ON.
*/
if ( unlikely ( timekeeping_suspended ) )
return - EAGAIN ;
return 0 ;
}
2014-07-16 21:04:04 +00:00
EXPORT_SYMBOL ( __getnstimeofday64 ) ;
2012-11-19 10:26:16 -08:00
/**
2014-07-16 21:04:04 +00:00
* getnstimeofday64 - Returns the time of day in a timespec64.
2014-11-07 13:13:04 -08:00
* @ts: pointer to the timespec64 to be set
2012-11-19 10:26:16 -08:00
*
2014-11-07 13:13:04 -08:00
* Returns the time of day in a timespec64 (WARN if suspended).
2012-11-19 10:26:16 -08:00
*/
2014-07-16 21:04:04 +00:00
void getnstimeofday64 ( struct timespec64 * ts )
2012-11-19 10:26:16 -08:00
{
2014-07-16 21:04:04 +00:00
WARN_ON ( __getnstimeofday64 ( ts ) ) ;
2007-05-08 00:27:59 -07:00
}
2014-07-16 21:04:04 +00:00
EXPORT_SYMBOL ( getnstimeofday64 ) ;
2007-05-08 00:27:59 -07:00
2009-07-07 11:27:28 +02:00
ktime_t ktime_get ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2009-07-07 11:27:28 +02:00
unsigned int seq ;
2014-07-16 21:04:12 +00:00
ktime_t base ;
s64 nsecs ;
2009-07-07 11:27:28 +02:00
WARN_ON ( timekeeping_suspended ) ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2014-07-16 21:05:16 +00:00
base = tk - > tkr . base_mono ;
2014-07-16 21:05:18 +00:00
nsecs = timekeeping_get_ns ( & tk - > tkr ) ;
2009-07-07 11:27:28 +02:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2014-07-16 21:03:53 +00:00
2014-07-16 21:04:12 +00:00
return ktime_add_ns ( base , nsecs ) ;
2009-07-07 11:27:28 +02:00
}
EXPORT_SYMBOL_GPL ( ktime_get ) ;
2014-07-16 21:04:13 +00:00
static ktime_t * offsets [ TK_OFFS_MAX ] = {
[ TK_OFFS_REAL ] = & tk_core . timekeeper . offs_real ,
[ TK_OFFS_BOOT ] = & tk_core . timekeeper . offs_boot ,
[ TK_OFFS_TAI ] = & tk_core . timekeeper . offs_tai ,
} ;
ktime_t ktime_get_with_offset ( enum tk_offsets offs )
{
struct timekeeper * tk = & tk_core . timekeeper ;
unsigned int seq ;
ktime_t base , * offset = offsets [ offs ] ;
s64 nsecs ;
WARN_ON ( timekeeping_suspended ) ;
do {
seq = read_seqcount_begin ( & tk_core . seq ) ;
2014-07-16 21:05:16 +00:00
base = ktime_add ( tk - > tkr . base_mono , * offset ) ;
2014-07-16 21:05:18 +00:00
nsecs = timekeeping_get_ns ( & tk - > tkr ) ;
2014-07-16 21:04:13 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
return ktime_add_ns ( base , nsecs ) ;
}
EXPORT_SYMBOL_GPL ( ktime_get_with_offset ) ;
2014-07-16 21:04:22 +00:00
/**
* ktime_mono_to_any() - convert mononotic time to any other time
* @tmono: time to convert.
* @offs: which offset to use
*/
ktime_t ktime_mono_to_any ( ktime_t tmono , enum tk_offsets offs )
{
ktime_t * offset = offsets [ offs ] ;
unsigned long seq ;
ktime_t tconv ;
do {
seq = read_seqcount_begin ( & tk_core . seq ) ;
tconv = ktime_add ( tmono , * offset ) ;
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
return tconv ;
}
EXPORT_SYMBOL_GPL ( ktime_mono_to_any ) ;
2014-07-16 21:05:04 +00:00
/**
* ktime_get_raw - Returns the raw monotonic time in ktime_t format
*/
ktime_t ktime_get_raw ( void )
{
struct timekeeper * tk = & tk_core . timekeeper ;
unsigned int seq ;
ktime_t base ;
s64 nsecs ;
do {
seq = read_seqcount_begin ( & tk_core . seq ) ;
base = tk - > base_raw ;
nsecs = timekeeping_get_ns_raw ( tk ) ;
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
return ktime_add_ns ( base , nsecs ) ;
}
EXPORT_SYMBOL_GPL ( ktime_get_raw ) ;
2009-07-07 11:27:28 +02:00
/**
2014-07-16 21:04:04 +00:00
* ktime_get_ts64 - get the monotonic clock in timespec64 format
2009-07-07 11:27:28 +02:00
* @ts: pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
2014-11-07 13:13:04 -08:00
* in normalized timespec64 format in the variable pointed to by @ts.
2009-07-07 11:27:28 +02:00
*/
2014-07-16 21:04:04 +00:00
void ktime_get_ts64 ( struct timespec64 * ts )
2009-07-07 11:27:28 +02:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:04:04 +00:00
struct timespec64 tomono ;
2012-09-11 19:26:03 -04:00
s64 nsec ;
2009-07-07 11:27:28 +02:00
unsigned int seq ;
WARN_ON ( timekeeping_suspended ) ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2014-07-16 21:04:04 +00:00
ts - > tv_sec = tk - > xtime_sec ;
2014-07-16 21:05:18 +00:00
nsec = timekeeping_get_ns ( & tk - > tkr ) ;
2012-07-27 14:48:13 -04:00
tomono = tk - > wall_to_monotonic ;
2009-07-07 11:27:28 +02:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2009-07-07 11:27:28 +02:00
2014-07-16 21:04:04 +00:00
ts - > tv_sec + = tomono . tv_sec ;
ts - > tv_nsec = 0 ;
timespec64_add_ns ( ts , nsec + tomono . tv_nsec ) ;
2009-07-07 11:27:28 +02:00
}
2014-07-16 21:04:04 +00:00
EXPORT_SYMBOL_GPL ( ktime_get_ts64 ) ;
2009-07-07 11:27:28 +02:00
2014-10-29 16:01:16 +05:30
/**
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
*
* Returns the seconds portion of CLOCK_MONOTONIC with a single non
* serialized read. tk->ktime_sec is of type 'unsigned long' so this
* works on both 32 and 64 bit systems. On 32 bit systems the readout
* covers ~136 years of uptime which should be enough to prevent
* premature wrap arounds.
*/
time64_t ktime_get_seconds ( void )
{
struct timekeeper * tk = & tk_core . timekeeper ;
WARN_ON ( timekeeping_suspended ) ;
return tk - > ktime_sec ;
}
EXPORT_SYMBOL_GPL ( ktime_get_seconds ) ;
2014-10-29 16:01:50 +05:30
/**
* ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
*
* Returns the wall clock seconds since 1970. This replaces the
* get_seconds() interface which is not y2038 safe on 32bit systems.
*
* For 64bit systems the fast access to tk->xtime_sec is preserved. On
* 32bit systems the access must be protected with the sequence
* counter to provide "atomic" access to the 64bit tk->xtime_sec
* value.
*/
time64_t ktime_get_real_seconds ( void )
{
struct timekeeper * tk = & tk_core . timekeeper ;
time64_t seconds ;
unsigned int seq ;
if ( IS_ENABLED ( CONFIG_64BIT ) )
return tk - > xtime_sec ;
do {
seq = read_seqcount_begin ( & tk_core . seq ) ;
seconds = tk - > xtime_sec ;
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
return seconds ;
}
EXPORT_SYMBOL_GPL ( ktime_get_real_seconds ) ;
2011-01-12 17:00:57 -08:00
# ifdef CONFIG_NTP_PPS
/**
* getnstime_raw_and_real - get day and raw monotonic time in timespec format
* @ts_raw: pointer to the timespec to be set to raw monotonic time
* @ts_real: pointer to the timespec to be set to the time of day
*
* This function reads both the time of day and raw monotonic time at the
* same time atomically and stores the resulting timestamps in timespec
* format.
*/
void getnstime_raw_and_real ( struct timespec * ts_raw , struct timespec * ts_real )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2011-01-12 17:00:57 -08:00
unsigned long seq ;
s64 nsecs_raw , nsecs_real ;
WARN_ON_ONCE ( timekeeping_suspended ) ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2011-01-12 17:00:57 -08:00
2014-07-16 21:04:01 +00:00
* ts_raw = timespec64_to_timespec ( tk - > raw_time ) ;
2012-07-27 14:48:13 -04:00
ts_real - > tv_sec = tk - > xtime_sec ;
2012-07-13 01:21:53 -04:00
ts_real - > tv_nsec = 0 ;
2011-01-12 17:00:57 -08:00
2012-07-27 14:48:13 -04:00
nsecs_raw = timekeeping_get_ns_raw ( tk ) ;
2014-07-16 21:05:18 +00:00
nsecs_real = timekeeping_get_ns ( & tk - > tkr ) ;
2011-01-12 17:00:57 -08:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2011-01-12 17:00:57 -08:00
timespec_add_ns ( ts_raw , nsecs_raw ) ;
timespec_add_ns ( ts_real , nsecs_real ) ;
}
EXPORT_SYMBOL ( getnstime_raw_and_real ) ;
# endif /* CONFIG_NTP_PPS */
2007-05-08 00:27:59 -07:00
/**
* do_gettimeofday - Returns the time of day in a timeval
* @tv: pointer to the timeval to be set
*
2008-01-30 13:30:01 +01:00
* NOTE: Users should be converted to using getnstimeofday()
2007-05-08 00:27:59 -07:00
*/
void do_gettimeofday ( struct timeval * tv )
{
2014-07-16 21:04:04 +00:00
struct timespec64 now ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:04:04 +00:00
getnstimeofday64 ( & now ) ;
2007-05-08 00:27:59 -07:00
tv - > tv_sec = now . tv_sec ;
tv - > tv_usec = now . tv_nsec / 1000 ;
}
EXPORT_SYMBOL ( do_gettimeofday ) ;
2012-04-27 10:12:42 +02:00
2007-05-08 00:27:59 -07:00
/**
2014-11-18 19:15:16 +08:00
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
2007-05-08 00:27:59 -07:00
*
* Sets the time of day to the new time and update NTP and notify hrtimers
*/
2014-11-18 19:15:16 +08:00
int do_settimeofday64 ( const struct timespec64 * ts )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-11-18 19:15:16 +08:00
struct timespec64 ts_delta , xt ;
2011-11-14 14:05:44 -08:00
unsigned long flags ;
2007-05-08 00:27:59 -07:00
2014-11-18 19:15:16 +08:00
if ( ! timespec64_valid_strict ( ts ) )
2007-05-08 00:27:59 -07:00
return - EINVAL ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2007-05-08 00:27:59 -07:00
2012-07-27 14:48:13 -04:00
timekeeping_forward_now ( tk ) ;
2007-05-08 00:27:59 -07:00
2012-07-27 14:48:13 -04:00
xt = tk_xtime ( tk ) ;
2014-11-18 19:15:16 +08:00
ts_delta . tv_sec = ts - > tv_sec - xt . tv_sec ;
ts_delta . tv_nsec = ts - > tv_nsec - xt . tv_nsec ;
2012-07-13 01:21:53 -04:00
2014-07-16 21:04:01 +00:00
tk_set_wall_to_mono ( tk , timespec64_sub ( tk - > wall_to_monotonic , ts_delta ) ) ;
2008-08-20 16:37:28 -07:00
2014-11-18 19:15:16 +08:00
tk_set_xtime ( tk , ts ) ;
2012-07-13 01:21:53 -04:00
2013-06-27 11:35:46 +01:00
timekeeping_update ( tk , TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2007-05-08 00:27:59 -07:00
/* signal hrtimers about time change */
clock_was_set ( ) ;
return 0 ;
}
2014-11-18 19:15:16 +08:00
EXPORT_SYMBOL ( do_settimeofday64 ) ;
2007-05-08 00:27:59 -07:00
2011-02-01 13:52:17 +00:00
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
* @tv: pointer to the timespec variable containing the offset
*
* Adds or subtracts an offset value from the current time.
*/
int timekeeping_inject_offset ( struct timespec * ts )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2011-11-14 14:05:44 -08:00
unsigned long flags ;
2014-07-16 21:04:01 +00:00
struct timespec64 ts64 , tmp ;
2012-08-08 15:36:20 -04:00
int ret = 0 ;
2011-02-01 13:52:17 +00:00
if ( ( unsigned long ) ts - > tv_nsec > = NSEC_PER_SEC )
return - EINVAL ;
2014-07-16 21:04:01 +00:00
ts64 = timespec_to_timespec64 ( * ts ) ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2011-02-01 13:52:17 +00:00
2012-07-27 14:48:13 -04:00
timekeeping_forward_now ( tk ) ;
2011-02-01 13:52:17 +00:00
2012-08-08 15:36:20 -04:00
/* Make sure the proposed value is valid */
2014-07-16 21:04:01 +00:00
tmp = timespec64_add ( tk_xtime ( tk ) , ts64 ) ;
if ( ! timespec64_valid_strict ( & tmp ) ) {
2012-08-08 15:36:20 -04:00
ret = - EINVAL ;
goto error ;
}
2012-07-13 01:21:53 -04:00
2014-07-16 21:04:01 +00:00
tk_xtime_add ( tk , & ts64 ) ;
tk_set_wall_to_mono ( tk , timespec64_sub ( tk - > wall_to_monotonic , ts64 ) ) ;
2011-02-01 13:52:17 +00:00
2012-08-08 15:36:20 -04:00
error : /* even if we error out, we forwarded the time, so call update */
2013-06-27 11:35:46 +01:00
timekeeping_update ( tk , TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2011-02-01 13:52:17 +00:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2011-02-01 13:52:17 +00:00
/* signal hrtimers about time change */
clock_was_set ( ) ;
2012-08-08 15:36:20 -04:00
return ret ;
2011-02-01 13:52:17 +00:00
}
EXPORT_SYMBOL ( timekeeping_inject_offset ) ;
2012-05-03 12:30:07 -07:00
/**
* timekeeping_get_tai_offset - Returns current TAI offset from UTC
*
*/
s32 timekeeping_get_tai_offset ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-05-03 12:30:07 -07:00
unsigned int seq ;
s32 ret ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2012-05-03 12:30:07 -07:00
ret = tk - > tai_offset ;
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2012-05-03 12:30:07 -07:00
return ret ;
}
/**
* __timekeeping_set_tai_offset - Lock free worker function
*
*/
2013-03-25 12:24:24 -07:00
static void __timekeeping_set_tai_offset ( struct timekeeper * tk , s32 tai_offset )
2012-05-03 12:30:07 -07:00
{
tk - > tai_offset = tai_offset ;
2013-12-10 17:13:35 -08:00
tk - > offs_tai = ktime_add ( tk - > offs_real , ktime_set ( tai_offset , 0 ) ) ;
2012-05-03 12:30:07 -07:00
}
/**
* timekeeping_set_tai_offset - Sets the current TAI offset from UTC
*
*/
void timekeeping_set_tai_offset ( s32 tai_offset )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-05-03 12:30:07 -07:00
unsigned long flags ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2012-05-03 12:30:07 -07:00
__timekeeping_set_tai_offset ( tk , tai_offset ) ;
2013-12-11 18:50:25 -08:00
timekeeping_update ( tk , TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2013-04-10 12:41:49 -07:00
clock_was_set ( ) ;
2012-05-03 12:30:07 -07:00
}
2007-05-08 00:27:59 -07:00
/**
* change_clocksource - Swaps clocksources if a new one is available
*
* Accumulates current time interval and initializes new clocksource
*/
2009-08-14 15:47:30 +02:00
static int change_clocksource ( void * data )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2009-04-21 12:24:02 -07:00
struct clocksource * new , * old ;
2012-03-14 16:38:15 -07:00
unsigned long flags ;
2007-05-08 00:27:59 -07:00
2009-08-14 15:47:30 +02:00
new = ( struct clocksource * ) data ;
2007-05-08 00:27:59 -07:00
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2012-03-14 16:38:15 -07:00
2012-07-27 14:48:13 -04:00
timekeeping_forward_now ( tk ) ;
2013-04-25 20:31:44 +00:00
/*
* If the cs is in module, get a module reference. Succeeds
* for built-in code (owner == NULL) as well.
*/
if ( try_module_get ( new - > owner ) ) {
if ( ! new - > enable | | new - > enable ( new ) = = 0 ) {
2014-07-16 21:05:16 +00:00
old = tk - > tkr . clock ;
2013-04-25 20:31:44 +00:00
tk_setup_internals ( tk , new ) ;
if ( old - > disable )
old - > disable ( old ) ;
module_put ( old - > owner ) ;
} else {
module_put ( new - > owner ) ;
}
2009-08-14 15:47:30 +02:00
}
2013-06-27 11:35:46 +01:00
timekeeping_update ( tk , TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2012-03-14 16:38:15 -07:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2012-03-14 16:38:15 -07:00
2009-08-14 15:47:30 +02:00
return 0 ;
}
2007-05-08 00:27:59 -07:00
2009-08-14 15:47:30 +02:00
/**
* timekeeping_notify - Install a new clock source
* @clock: pointer to the clock source
*
* This function is called from clocksource.c after a new, better clock
* source has been registered. The caller holds the clocksource_mutex.
*/
2013-04-25 20:31:44 +00:00
int timekeeping_notify ( struct clocksource * clock )
2009-08-14 15:47:30 +02:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-07-27 14:48:13 -04:00
2014-07-16 21:05:16 +00:00
if ( tk - > tkr . clock = = clock )
2013-04-25 20:31:44 +00:00
return 0 ;
2009-08-14 15:47:30 +02:00
stop_machine ( change_clocksource , clock , NULL ) ;
2007-05-08 00:27:59 -07:00
tick_clock_notify ( ) ;
2014-07-16 21:05:16 +00:00
return tk - > tkr . clock = = clock ? 0 : - 1 ;
2007-05-08 00:27:59 -07:00
}
2009-08-14 15:47:30 +02:00
2008-08-20 16:37:30 -07:00
/**
2014-11-07 11:03:20 -08:00
* getrawmonotonic64 - Returns the raw monotonic time in a timespec
* @ts: pointer to the timespec64 to be set
2008-08-20 16:37:30 -07:00
*
* Returns the raw monotonic time (completely un-modified by ntp)
*/
2014-11-07 11:03:20 -08:00
void getrawmonotonic64 ( struct timespec64 * ts )
2008-08-20 16:37:30 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:04:01 +00:00
struct timespec64 ts64 ;
2008-08-20 16:37:30 -07:00
unsigned long seq ;
s64 nsecs ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2012-07-27 14:48:13 -04:00
nsecs = timekeeping_get_ns_raw ( tk ) ;
2014-07-16 21:04:01 +00:00
ts64 = tk - > raw_time ;
2008-08-20 16:37:30 -07:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2008-08-20 16:37:30 -07:00
2014-07-16 21:04:01 +00:00
timespec64_add_ns ( & ts64 , nsecs ) ;
2014-11-07 11:03:20 -08:00
* ts = ts64 ;
2008-08-20 16:37:30 -07:00
}
2014-11-07 11:03:20 -08:00
EXPORT_SYMBOL ( getrawmonotonic64 ) ;
2008-08-20 16:37:30 -07:00
2007-05-08 00:27:59 -07:00
/**
2008-02-08 04:19:24 -08:00
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
2007-05-08 00:27:59 -07:00
*/
2008-02-08 04:19:24 -08:00
int timekeeping_valid_for_hres ( void )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2007-05-08 00:27:59 -07:00
unsigned long seq ;
int ret ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:05:16 +00:00
ret = tk - > tkr . clock - > flags & CLOCK_SOURCE_VALID_FOR_HRES ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2007-05-08 00:27:59 -07:00
return ret ;
}
2009-08-18 12:45:10 -05:00
/**
* timekeeping_max_deferment - Returns max time the clocksource can be deferred
*/
u64 timekeeping_max_deferment ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2011-11-14 12:48:10 -08:00
unsigned long seq ;
u64 ret ;
2012-07-13 01:21:51 -04:00
2011-11-14 12:48:10 -08:00
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2011-11-14 12:48:10 -08:00
2014-07-16 21:05:16 +00:00
ret = tk - > tkr . clock - > max_idle_ns ;
2011-11-14 12:48:10 -08:00
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2011-11-14 12:48:10 -08:00
return ret ;
2009-08-18 12:45:10 -05:00
}
2007-05-08 00:27:59 -07:00
/**
2009-08-14 15:47:31 +02:00
* read_persistent_clock - Return time from the persistent clock.
2007-05-08 00:27:59 -07:00
*
* Weak dummy function for arches that do not yet support it.
2009-08-14 15:47:31 +02:00
* Reads the time from the battery backed persistent clock.
* Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
2007-05-08 00:27:59 -07:00
*
* XXX - Do be sure to remove it once all arches implement it.
*/
2014-04-07 15:39:20 -07:00
void __weak read_persistent_clock ( struct timespec * ts )
2007-05-08 00:27:59 -07:00
{
2009-08-14 15:47:31 +02:00
ts - > tv_sec = 0 ;
ts - > tv_nsec = 0 ;
2007-05-08 00:27:59 -07:00
}
2009-08-14 15:47:32 +02:00
/**
* read_boot_clock - Return time of the system start.
*
* Weak dummy function for arches that do not yet support it.
* Function to read the exact time the system has been started.
* Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
*
* XXX - Do be sure to remove it once all arches implement it.
*/
2014-04-07 15:39:20 -07:00
void __weak read_boot_clock ( struct timespec * ts )
2009-08-14 15:47:32 +02:00
{
ts - > tv_sec = 0 ;
ts - > tv_nsec = 0 ;
}
2007-05-08 00:27:59 -07:00
/*
* timekeeping_init - Initializes the clocksource and common timekeeping values
*/
void __init timekeeping_init ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2009-08-14 15:47:26 +02:00
struct clocksource * clock ;
2007-05-08 00:27:59 -07:00
unsigned long flags ;
2014-07-16 21:04:01 +00:00
struct timespec64 now , boot , tmp ;
struct timespec ts ;
2009-08-14 15:47:31 +02:00
2014-07-16 21:04:01 +00:00
read_persistent_clock ( & ts ) ;
now = timespec_to_timespec64 ( ts ) ;
if ( ! timespec64_valid_strict ( & now ) ) {
2012-08-08 15:36:20 -04:00
pr_warn ( " WARNING: Persistent clock returned invalid value! \n "
" Check your CMOS/BIOS settings. \n " ) ;
now . tv_sec = 0 ;
now . tv_nsec = 0 ;
2013-01-16 00:09:47 +08:00
} else if ( now . tv_sec | | now . tv_nsec )
persistent_clock_exist = true ;
2012-08-08 15:36:20 -04:00
2014-07-16 21:04:01 +00:00
read_boot_clock ( & ts ) ;
boot = timespec_to_timespec64 ( ts ) ;
if ( ! timespec64_valid_strict ( & boot ) ) {
2012-08-08 15:36:20 -04:00
pr_warn ( " WARNING: Boot clock returned invalid value! \n "
" Check your CMOS/BIOS settings. \n " ) ;
boot . tv_sec = 0 ;
boot . tv_nsec = 0 ;
}
2007-05-08 00:27:59 -07:00
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2013-03-22 11:37:28 -07:00
ntp_init ( ) ;
2009-08-14 15:47:21 +02:00
clock = clocksource_default_clock ( ) ;
2009-08-14 15:47:19 +02:00
if ( clock - > enable )
clock - > enable ( clock ) ;
2012-07-27 14:48:13 -04:00
tk_setup_internals ( tk , clock ) ;
2007-05-08 00:27:59 -07:00
2012-07-27 14:48:13 -04:00
tk_set_xtime ( tk , & now ) ;
tk - > raw_time . tv_sec = 0 ;
tk - > raw_time . tv_nsec = 0 ;
2014-07-16 21:05:04 +00:00
tk - > base_raw . tv64 = 0 ;
2012-07-13 01:21:53 -04:00
if ( boot . tv_sec = = 0 & & boot . tv_nsec = = 0 )
2012-07-27 14:48:13 -04:00
boot = tk_xtime ( tk ) ;
2012-07-13 01:21:53 -04:00
2014-07-16 21:04:01 +00:00
set_normalized_timespec64 ( & tmp , - boot . tv_sec , - boot . tv_nsec ) ;
2012-07-27 14:48:13 -04:00
tk_set_wall_to_mono ( tk , tmp ) ;
2012-07-27 14:48:12 -04:00
2014-07-16 21:04:09 +00:00
timekeeping_update ( tk , TK_MIRROR ) ;
2013-02-21 22:51:40 +00:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2007-05-08 00:27:59 -07:00
}
/* time in seconds when suspend began */
2014-07-16 21:04:01 +00:00
static struct timespec64 timekeeping_suspend_time ;
2007-05-08 00:27:59 -07:00
2011-04-01 14:32:09 -07:00
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
*
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
*/
2012-07-13 01:21:57 -04:00
static void __timekeeping_inject_sleeptime ( struct timekeeper * tk ,
2014-07-16 21:04:01 +00:00
struct timespec64 * delta )
2011-04-01 14:32:09 -07:00
{
2014-07-16 21:04:01 +00:00
if ( ! timespec64_valid_strict ( delta ) ) {
2014-06-04 16:11:43 -07:00
printk_deferred ( KERN_WARNING
" __timekeeping_inject_sleeptime: Invalid "
" sleep delta value! \n " ) ;
2011-06-01 18:18:09 -07:00
return ;
}
2012-07-13 01:21:57 -04:00
tk_xtime_add ( tk , delta ) ;
2014-07-16 21:04:01 +00:00
tk_set_wall_to_mono ( tk , timespec64_sub ( tk - > wall_to_monotonic , * delta ) ) ;
2014-07-16 21:05:00 +00:00
tk_update_sleep_time ( tk , timespec64_to_ktime ( * delta ) ) ;
2013-05-21 22:32:14 -07:00
tk_debug_account_sleep_time ( delta ) ;
2011-04-01 14:32:09 -07:00
}
/**
2014-11-18 19:15:17 +08:00
* timekeeping_inject_sleeptime64 - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec64 delta value
2011-04-01 14:32:09 -07:00
*
* This hook is for architectures that cannot support read_persistent_clock
* because their RTC/persistent clock is only accessible when irqs are enabled.
*
* This function should only be called by rtc_resume(), and allows
* a suspend offset to be injected into the timekeeping values.
*/
2014-11-18 19:15:17 +08:00
void timekeeping_inject_sleeptime64 ( struct timespec64 * delta )
2011-04-01 14:32:09 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2011-11-14 14:05:44 -08:00
unsigned long flags ;
2011-04-01 14:32:09 -07:00
2013-01-16 00:09:47 +08:00
/*
* Make sure we don't set the clock twice, as timekeeping_resume()
* already did it
*/
if ( has_persistent_clock ( ) )
2011-04-01 14:32:09 -07:00
return ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2011-11-14 12:48:10 -08:00
2012-07-27 14:48:13 -04:00
timekeeping_forward_now ( tk ) ;
2011-04-01 14:32:09 -07:00
2014-11-18 19:15:17 +08:00
__timekeeping_inject_sleeptime ( tk , delta ) ;
2011-04-01 14:32:09 -07:00
2013-06-27 11:35:46 +01:00
timekeeping_update ( tk , TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2011-04-01 14:32:09 -07:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2011-04-01 14:32:09 -07:00
/* signal hrtimers about time change */
clock_was_set ( ) ;
}
2007-05-08 00:27:59 -07:00
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*
* This is for the generic clocksource timekeeping.
* xtime/wall_to_monotonic/jiffies/etc are
* still managed by arch specific suspend/resume code.
*/
2015-02-13 23:50:43 +01:00
void timekeeping_resume ( void )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:05:16 +00:00
struct clocksource * clock = tk - > tkr . clock ;
2011-11-14 14:05:44 -08:00
unsigned long flags ;
2014-07-16 21:04:01 +00:00
struct timespec64 ts_new , ts_delta ;
struct timespec tmp ;
2013-03-12 11:56:48 +08:00
cycle_t cycle_now , cycle_delta ;
bool suspendtime_found = false ;
2009-08-14 15:47:31 +02:00
2014-07-16 21:04:01 +00:00
read_persistent_clock ( & tmp ) ;
ts_new = timespec_to_timespec64 ( tmp ) ;
2007-05-08 00:27:59 -07:00
2012-08-06 01:40:41 +02:00
clockevents_resume ( ) ;
2007-05-14 11:10:02 +02:00
clocksource_resume ( ) ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2007-05-08 00:27:59 -07:00
2013-03-12 11:56:48 +08:00
/*
* After system resumes, we need to calculate the suspended time and
* compensate it for the OS time. There are 3 sources that could be
* used: Nonstop clocksource during suspend, persistent clock and rtc
* device.
*
* One specific platform may have 1 or 2 or all of them, and the
* preference will be:
* suspend-nonstop clocksource -> persistent clock -> rtc
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
2014-07-16 21:05:16 +00:00
cycle_now = tk - > tkr . read ( clock ) ;
2013-03-12 11:56:48 +08:00
if ( ( clock - > flags & CLOCK_SOURCE_SUSPEND_NONSTOP ) & &
2014-07-16 21:05:16 +00:00
cycle_now > tk - > tkr . cycle_last ) {
2013-03-12 11:56:48 +08:00
u64 num , max = ULLONG_MAX ;
u32 mult = clock - > mult ;
u32 shift = clock - > shift ;
s64 nsec = 0 ;
2014-07-16 21:05:16 +00:00
cycle_delta = clocksource_delta ( cycle_now , tk - > tkr . cycle_last ,
tk - > tkr . mask ) ;
2013-03-12 11:56:48 +08:00
/*
* "cycle_delta * mutl" may cause 64 bits overflow, if the
* suspended time is too long. In that case we need do the
* 64 bits math carefully
*/
do_div ( max , mult ) ;
if ( cycle_delta > max ) {
num = div64_u64 ( cycle_delta , max ) ;
nsec = ( ( ( u64 ) max * mult ) > > shift ) * num ;
cycle_delta - = num * max ;
}
nsec + = ( ( u64 ) cycle_delta * mult ) > > shift ;
2014-07-16 21:04:01 +00:00
ts_delta = ns_to_timespec64 ( nsec ) ;
2013-03-12 11:56:48 +08:00
suspendtime_found = true ;
2014-07-16 21:04:01 +00:00
} else if ( timespec64_compare ( & ts_new , & timekeeping_suspend_time ) > 0 ) {
ts_delta = timespec64_sub ( ts_new , timekeeping_suspend_time ) ;
2013-03-12 11:56:48 +08:00
suspendtime_found = true ;
2007-05-08 00:27:59 -07:00
}
2013-03-12 11:56:48 +08:00
if ( suspendtime_found )
__timekeeping_inject_sleeptime ( tk , & ts_delta ) ;
/* Re-base the last cycle value */
2014-07-16 21:05:16 +00:00
tk - > tkr . cycle_last = cycle_now ;
2012-07-27 14:48:13 -04:00
tk - > ntp_error = 0 ;
2007-05-08 00:27:59 -07:00
timekeeping_suspended = 0 ;
2013-06-27 11:35:46 +01:00
timekeeping_update ( tk , TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2007-05-08 00:27:59 -07:00
touch_softlockup_watchdog ( ) ;
clockevents_notify ( CLOCK_EVT_NOTIFY_RESUME , NULL ) ;
/* Resume hrtimers */
2011-05-02 16:48:57 +02:00
hrtimers_resume ( ) ;
2007-05-08 00:27:59 -07:00
}
2015-02-13 23:50:43 +01:00
int timekeeping_suspend ( void )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2011-11-14 14:05:44 -08:00
unsigned long flags ;
2014-07-16 21:04:01 +00:00
struct timespec64 delta , delta_delta ;
static struct timespec64 old_delta ;
struct timespec tmp ;
2007-05-08 00:27:59 -07:00
2014-07-16 21:04:01 +00:00
read_persistent_clock ( & tmp ) ;
timekeeping_suspend_time = timespec_to_timespec64 ( tmp ) ;
2007-09-16 15:36:43 +02:00
2013-05-17 11:24:05 -07:00
/*
* On some systems the persistent_clock can not be detected at
* timekeeping_init by its return value, so if we see a valid
* value returned, update the persistent_clock_exists flag.
*/
if ( timekeeping_suspend_time . tv_sec | | timekeeping_suspend_time . tv_nsec )
persistent_clock_exist = true ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2012-07-27 14:48:13 -04:00
timekeeping_forward_now ( tk ) ;
2007-05-08 00:27:59 -07:00
timekeeping_suspended = 1 ;
2011-05-31 22:53:23 -07:00
/*
* To avoid drift caused by repeated suspend/resumes,
* which each can add ~1 second drift error,
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
2014-07-16 21:04:01 +00:00
delta = timespec64_sub ( tk_xtime ( tk ) , timekeeping_suspend_time ) ;
delta_delta = timespec64_sub ( delta , old_delta ) ;
2011-05-31 22:53:23 -07:00
if ( abs ( delta_delta . tv_sec ) > = 2 ) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
*/
old_delta = delta ;
} else {
/* Otherwise try to adjust old_system to compensate */
timekeeping_suspend_time =
2014-07-16 21:04:01 +00:00
timespec64_add ( timekeeping_suspend_time , delta_delta ) ;
2011-05-31 22:53:23 -07:00
}
2013-12-11 19:10:36 -08:00
timekeeping_update ( tk , TK_MIRROR ) ;
2015-02-13 14:49:02 +01:00
halt_fast_timekeeper ( tk ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2007-05-08 00:27:59 -07:00
clockevents_notify ( CLOCK_EVT_NOTIFY_SUSPEND , NULL ) ;
2010-02-02 14:41:41 -08:00
clocksource_suspend ( ) ;
2012-08-06 01:40:41 +02:00
clockevents_suspend ( ) ;
2007-05-08 00:27:59 -07:00
return 0 ;
}
/* sysfs resume/suspend bits for timekeeping */
2011-03-23 22:16:04 +01:00
static struct syscore_ops timekeeping_syscore_ops = {
2007-05-08 00:27:59 -07:00
. resume = timekeeping_resume ,
. suspend = timekeeping_suspend ,
} ;
2011-03-23 22:16:04 +01:00
static int __init timekeeping_init_ops ( void )
2007-05-08 00:27:59 -07:00
{
2011-03-23 22:16:04 +01:00
register_syscore_ops ( & timekeeping_syscore_ops ) ;
return 0 ;
2007-05-08 00:27:59 -07:00
}
2011-03-23 22:16:04 +01:00
device_initcall ( timekeeping_init_ops ) ;
2007-05-08 00:27:59 -07:00
/*
2013-12-06 17:25:21 -08:00
* Apply a multiplier adjustment to the timekeeper
2007-05-08 00:27:59 -07:00
*/
2013-12-06 17:25:21 -08:00
static __always_inline void timekeeping_apply_adjustment ( struct timekeeper * tk ,
s64 offset ,
bool negative ,
int adj_scale )
2007-05-08 00:27:59 -07:00
{
2013-12-06 17:25:21 -08:00
s64 interval = tk - > cycle_interval ;
s32 mult_adj = 1 ;
2007-05-08 00:27:59 -07:00
2013-12-06 17:25:21 -08:00
if ( negative ) {
mult_adj = - mult_adj ;
interval = - interval ;
offset = - offset ;
2007-05-08 00:27:59 -07:00
}
2013-12-06 17:25:21 -08:00
mult_adj < < = adj_scale ;
interval < < = adj_scale ;
offset < < = adj_scale ;
2007-05-08 00:27:59 -07:00
2011-10-27 18:12:42 -07:00
/*
* So the following can be confusing.
*
2013-12-06 17:25:21 -08:00
* To keep things simple, lets assume mult_adj == 1 for now.
2011-10-27 18:12:42 -07:00
*
2013-12-06 17:25:21 -08:00
* When mult_adj != 1, remember that the interval and offset values
2011-10-27 18:12:42 -07:00
* have been appropriately scaled so the math is the same.
*
* The basic idea here is that we're increasing the multiplier
* by one, this causes the xtime_interval to be incremented by
* one cycle_interval. This is because:
* xtime_interval = cycle_interval * mult
* So if mult is being incremented by one:
* xtime_interval = cycle_interval * (mult + 1)
* Its the same as:
* xtime_interval = (cycle_interval * mult) + cycle_interval
* Which can be shortened to:
* xtime_interval += cycle_interval
*
* So offset stores the non-accumulated cycles. Thus the current
* time (in shifted nanoseconds) is:
* now = (offset * adj) + xtime_nsec
* Now, even though we're adjusting the clock frequency, we have
* to keep time consistent. In other words, we can't jump back
* in time, and we also want to avoid jumping forward in time.
*
* So given the same offset value, we need the time to be the same
* both before and after the freq adjustment.
* now = (offset * adj_1) + xtime_nsec_1
* now = (offset * adj_2) + xtime_nsec_2
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_2) + xtime_nsec_2
* And we know:
* adj_2 = adj_1 + 1
* So:
* (offset * adj_1) + xtime_nsec_1 =
* (offset * (adj_1+1)) + xtime_nsec_2
* (offset * adj_1) + xtime_nsec_1 =
* (offset * adj_1) + offset + xtime_nsec_2
* Canceling the sides:
* xtime_nsec_1 = offset + xtime_nsec_2
* Which gives us:
* xtime_nsec_2 = xtime_nsec_1 - offset
* Which simplfies to:
* xtime_nsec -= offset
*
* XXX - TODO: Doc ntp_error calculation.
*/
2014-11-24 20:35:45 -08:00
if ( ( mult_adj > 0 ) & & ( tk - > tkr . mult + mult_adj < mult_adj ) ) {
2014-10-08 15:03:34 +08:00
/* NTP adjustment caused clocksource mult overflow */
WARN_ON_ONCE ( 1 ) ;
return ;
}
2013-12-06 17:25:21 -08:00
tk - > tkr . mult + = mult_adj ;
2012-07-13 01:21:57 -04:00
tk - > xtime_interval + = interval ;
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec - = offset ;
2012-07-13 01:21:57 -04:00
tk - > ntp_error - = ( interval - offset ) < < tk - > ntp_error_shift ;
2013-12-06 17:25:21 -08:00
}
/*
* Calculate the multiplier adjustment needed to match the frequency
* specified by NTP
*/
static __always_inline void timekeeping_freqadjust ( struct timekeeper * tk ,
s64 offset )
{
s64 interval = tk - > cycle_interval ;
s64 xinterval = tk - > xtime_interval ;
s64 tick_error ;
bool negative ;
u32 adj ;
/* Remove any current error adj from freq calculation */
if ( tk - > ntp_err_mult )
xinterval - = tk - > cycle_interval ;
2014-04-23 20:53:29 -07:00
tk - > ntp_tick = ntp_tick_length ( ) ;
2013-12-06 17:25:21 -08:00
/* Calculate current error per tick */
tick_error = ntp_tick_length ( ) > > tk - > ntp_error_shift ;
tick_error - = ( xinterval + tk - > xtime_remainder ) ;
/* Don't worry about correcting it if its small */
if ( likely ( ( tick_error > = 0 ) & & ( tick_error < = interval ) ) )
return ;
/* preserve the direction of correction */
negative = ( tick_error < 0 ) ;
/* Sort out the magnitude of the correction */
tick_error = abs ( tick_error ) ;
for ( adj = 0 ; tick_error > interval ; adj + + )
tick_error > > = 1 ;
/* scale the corrections */
timekeeping_apply_adjustment ( tk , offset , negative , adj ) ;
}
/*
* Adjust the timekeeper's multiplier to the correct frequency
* and also to reduce the accumulated error value.
*/
static void timekeeping_adjust ( struct timekeeper * tk , s64 offset )
{
/* Correct for the current frequency error */
timekeeping_freqadjust ( tk , offset ) ;
/* Next make a small adjustment to fix any cumulative error */
if ( ! tk - > ntp_err_mult & & ( tk - > ntp_error > 0 ) ) {
tk - > ntp_err_mult = 1 ;
timekeeping_apply_adjustment ( tk , offset , 0 , 0 ) ;
} else if ( tk - > ntp_err_mult & & ( tk - > ntp_error < = 0 ) ) {
/* Undo any existing error adjustment */
timekeeping_apply_adjustment ( tk , offset , 1 , 0 ) ;
tk - > ntp_err_mult = 0 ;
}
if ( unlikely ( tk - > tkr . clock - > maxadj & &
2014-10-09 15:04:31 +08:00
( abs ( tk - > tkr . mult - tk - > tkr . clock - > mult )
> tk - > tkr . clock - > maxadj ) ) ) {
2013-12-06 17:25:21 -08:00
printk_once ( KERN_WARNING
" Adjusting %s more than 11%% (%ld vs %ld) \n " ,
tk - > tkr . clock - > name , ( long ) tk - > tkr . mult ,
( long ) tk - > tkr . clock - > mult + tk - > tkr . clock - > maxadj ) ;
}
2012-07-13 01:21:56 -04:00
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
* in the code above, its possible the required corrective factor to
* xtime_nsec could cause it to underflow.
*
* Now, since we already accumulated the second, cannot simply roll
* the accumulated second back, since the NTP subsystem has been
* notified via second_overflow. So instead we push xtime_nsec forward
* by the amount we underflowed, and add that amount into the error.
*
* We'll correct this error next time through this function, when
* xtime_nsec is not as small.
*/
2014-07-16 21:05:16 +00:00
if ( unlikely ( ( s64 ) tk - > tkr . xtime_nsec < 0 ) ) {
s64 neg = - ( s64 ) tk - > tkr . xtime_nsec ;
tk - > tkr . xtime_nsec = 0 ;
2012-07-13 01:21:57 -04:00
tk - > ntp_error + = neg < < tk - > ntp_error_shift ;
2012-07-13 01:21:56 -04:00
}
2007-05-08 00:27:59 -07:00
}
2012-07-13 01:21:54 -04:00
/**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
* Helper function that accumulates a the nsecs greater then a second
* from the xtime_nsec field to the xtime_secs field.
* It also calls into the NTP code to handle leapsecond processing.
*
*/
2013-06-27 11:35:46 +01:00
static inline unsigned int accumulate_nsecs_to_secs ( struct timekeeper * tk )
2012-07-13 01:21:54 -04:00
{
2014-07-16 21:05:16 +00:00
u64 nsecps = ( u64 ) NSEC_PER_SEC < < tk - > tkr . shift ;
2013-12-11 20:07:49 -08:00
unsigned int clock_set = 0 ;
2012-07-13 01:21:54 -04:00
2014-07-16 21:05:16 +00:00
while ( tk - > tkr . xtime_nsec > = nsecps ) {
2012-07-13 01:21:54 -04:00
int leap ;
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec - = nsecps ;
2012-07-13 01:21:54 -04:00
tk - > xtime_sec + + ;
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow ( tk - > xtime_sec ) ;
2012-07-27 14:48:12 -04:00
if ( unlikely ( leap ) ) {
2014-07-16 21:04:01 +00:00
struct timespec64 ts ;
2012-07-13 01:21:54 -04:00
2012-07-27 14:48:12 -04:00
tk - > xtime_sec + = leap ;
ts . tv_sec = leap ;
ts . tv_nsec = 0 ;
tk_set_wall_to_mono ( tk ,
2014-07-16 21:04:01 +00:00
timespec64_sub ( tk - > wall_to_monotonic , ts ) ) ;
2012-07-27 14:48:12 -04:00
2012-05-03 12:30:07 -07:00
__timekeeping_set_tai_offset ( tk , tk - > tai_offset - leap ) ;
2013-12-11 20:07:49 -08:00
clock_set = TK_CLOCK_WAS_SET ;
2012-07-27 14:48:12 -04:00
}
2012-07-13 01:21:54 -04:00
}
2013-12-11 20:07:49 -08:00
return clock_set ;
2012-07-13 01:21:54 -04:00
}
2009-10-02 16:17:53 -07:00
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
* This functions accumulates a shifted interval of cycles into
* into a shifted interval nanoseconds. Allows for O(log) accumulation
* loop.
*
* Returns the unconsumed cycles.
*/
2012-07-13 01:21:57 -04:00
static cycle_t logarithmic_accumulation ( struct timekeeper * tk , cycle_t offset ,
2013-12-11 20:07:49 -08:00
u32 shift ,
unsigned int * clock_set )
2009-10-02 16:17:53 -07:00
{
2013-02-21 22:51:36 +00:00
cycle_t interval = tk - > cycle_interval < < shift ;
2010-08-09 14:20:09 -07:00
u64 raw_nsecs ;
2009-10-02 16:17:53 -07:00
2012-07-13 01:21:57 -04:00
/* If the offset is smaller then a shifted interval, do nothing */
2013-02-21 22:51:36 +00:00
if ( offset < interval )
2009-10-02 16:17:53 -07:00
return offset ;
/* Accumulate one shifted interval */
2013-02-21 22:51:36 +00:00
offset - = interval ;
2014-07-16 21:05:16 +00:00
tk - > tkr . cycle_last + = interval ;
2009-10-02 16:17:53 -07:00
2014-07-16 21:05:16 +00:00
tk - > tkr . xtime_nsec + = tk - > xtime_interval < < shift ;
2013-12-11 20:07:49 -08:00
* clock_set | = accumulate_nsecs_to_secs ( tk ) ;
2009-10-02 16:17:53 -07:00
2010-08-09 14:20:09 -07:00
/* Accumulate raw time */
2012-10-09 10:18:23 +03:00
raw_nsecs = ( u64 ) tk - > raw_interval < < shift ;
2012-07-13 01:21:57 -04:00
raw_nsecs + = tk - > raw_time . tv_nsec ;
2010-08-13 11:30:58 -07:00
if ( raw_nsecs > = NSEC_PER_SEC ) {
u64 raw_secs = raw_nsecs ;
raw_nsecs = do_div ( raw_secs , NSEC_PER_SEC ) ;
2012-07-13 01:21:57 -04:00
tk - > raw_time . tv_sec + = raw_secs ;
2009-10-02 16:17:53 -07:00
}
2012-07-13 01:21:57 -04:00
tk - > raw_time . tv_nsec = raw_nsecs ;
2009-10-02 16:17:53 -07:00
/* Accumulate error between NTP and clock interval */
2014-04-23 20:53:29 -07:00
tk - > ntp_error + = tk - > ntp_tick < < shift ;
2012-07-13 01:21:57 -04:00
tk - > ntp_error - = ( tk - > xtime_interval + tk - > xtime_remainder ) < <
( tk - > ntp_error_shift + shift ) ;
2009-10-02 16:17:53 -07:00
return offset ;
}
2007-05-08 00:27:59 -07:00
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
*/
2013-12-12 13:10:55 -08:00
void update_wall_time ( void )
2007-05-08 00:27:59 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * real_tk = & tk_core . timekeeper ;
2013-02-21 22:51:40 +00:00
struct timekeeper * tk = & shadow_timekeeper ;
2007-05-08 00:27:59 -07:00
cycle_t offset ;
2009-10-02 16:17:53 -07:00
int shift = 0 , maxshift ;
2013-12-11 20:07:49 -08:00
unsigned int clock_set = 0 ;
2011-11-14 12:48:10 -08:00
unsigned long flags ;
2013-02-21 22:51:38 +00:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2007-05-08 00:27:59 -07:00
/* Make sure we're fully resumed: */
if ( unlikely ( timekeeping_suspended ) )
2011-11-14 12:48:10 -08:00
goto out ;
2007-05-08 00:27:59 -07:00
2010-07-13 17:56:20 -07:00
# ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2013-02-21 22:51:40 +00:00
offset = real_tk - > cycle_interval ;
2010-07-13 17:56:20 -07:00
# else
2014-07-16 21:05:16 +00:00
offset = clocksource_delta ( tk - > tkr . read ( tk - > tkr . clock ) ,
tk - > tkr . cycle_last , tk - > tkr . mask ) ;
2007-05-08 00:27:59 -07:00
# endif
2012-08-21 20:30:49 -04:00
/* Check if there's really nothing to do */
2013-02-21 22:51:40 +00:00
if ( offset < real_tk - > cycle_interval )
2012-08-21 20:30:49 -04:00
goto out ;
2015-03-11 21:16:32 -07:00
/* Do some additional sanity checking */
timekeeping_check_update ( real_tk , offset ) ;
2009-10-02 16:17:53 -07:00
/*
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals
2012-03-14 21:28:56 -06:00
* that is smaller than the offset. We then accumulate that
2009-10-02 16:17:53 -07:00
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
2007-05-08 00:27:59 -07:00
*/
2012-07-27 14:48:13 -04:00
shift = ilog2 ( offset ) - ilog2 ( tk - > cycle_interval ) ;
2009-10-02 16:17:53 -07:00
shift = max ( 0 , shift ) ;
2012-03-14 21:28:56 -06:00
/* Bound shift to one less than what overflows tick_length */
2011-11-14 13:18:07 -08:00
maxshift = ( 64 - ( ilog2 ( ntp_tick_length ( ) ) + 1 ) ) - 1 ;
2009-10-02 16:17:53 -07:00
shift = min ( shift , maxshift ) ;
2012-07-27 14:48:13 -04:00
while ( offset > = tk - > cycle_interval ) {
2013-12-11 20:07:49 -08:00
offset = logarithmic_accumulation ( tk , offset , shift ,
& clock_set ) ;
2012-07-27 14:48:13 -04:00
if ( offset < tk - > cycle_interval < < shift )
2010-03-18 14:47:30 -07:00
shift - - ;
2007-05-08 00:27:59 -07:00
}
/* correct the clock when NTP error is too big */
2012-07-27 14:48:13 -04:00
timekeeping_adjust ( tk , offset ) ;
2007-05-08 00:27:59 -07:00
2010-04-06 14:30:51 -07:00
/*
2012-09-04 15:38:12 -04:00
* XXX This can be killed once everyone converts
* to the new update_vsyscall.
*/
old_vsyscall_fixup ( tk ) ;
2007-05-08 00:27:59 -07:00
2010-04-06 14:30:51 -07:00
/*
* Finally, make sure that after the rounding
2012-07-13 01:21:53 -04:00
* xtime_nsec isn't larger than NSEC_PER_SEC
2010-04-06 14:30:51 -07:00
*/
2013-12-11 20:07:49 -08:00
clock_set | = accumulate_nsecs_to_secs ( tk ) ;
2009-12-22 14:10:37 -08:00
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2013-02-21 22:51:40 +00:00
/*
* Update the real timekeeper.
*
* We could avoid this memcpy by switching pointers, but that
* requires changes to all other timekeeper usage sites as
* well, i.e. move the timekeeper pointer getter into the
* spinlocked/seqcount protected sections. And we trade this
2014-07-16 21:04:07 +00:00
* memcpy under the tk_core.seq against one before we start
2013-02-21 22:51:40 +00:00
* updating.
*/
memcpy ( real_tk , tk , sizeof ( * tk ) ) ;
2013-12-11 20:07:49 -08:00
timekeeping_update ( real_tk , clock_set ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-02-21 22:51:40 +00:00
out :
2013-02-21 22:51:38 +00:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2013-12-12 13:10:55 -08:00
if ( clock_set )
2014-03-27 16:30:49 -07:00
/* Have to call _delayed version, since in irq context*/
clock_was_set_delayed ( ) ;
2007-05-08 00:27:59 -07:00
}
2007-07-15 23:39:41 -07:00
/**
2014-12-08 12:00:09 -08:00
* getboottime64 - Return the real time of system boot.
* @ts: pointer to the timespec64 to be set
2007-07-15 23:39:41 -07:00
*
2014-12-08 12:00:09 -08:00
* Returns the wall-time of boot in a timespec64.
2007-07-15 23:39:41 -07:00
*
* This is based on the wall_to_monotonic offset and the total suspend
* time. Calls to settimeofday will affect the value returned (which
* basically means that however wrong your real time clock is at boot time,
* you get the right time here).
*/
2014-12-08 12:00:09 -08:00
void getboottime64 ( struct timespec64 * ts )
2007-07-15 23:39:41 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:04:58 +00:00
ktime_t t = ktime_sub ( tk - > offs_real , tk - > offs_boot ) ;
2009-08-14 15:47:31 +02:00
2014-12-08 12:00:09 -08:00
* ts = ktime_to_timespec64 ( t ) ;
2007-07-15 23:39:41 -07:00
}
2014-12-08 12:00:09 -08:00
EXPORT_SYMBOL_GPL ( getboottime64 ) ;
2007-07-15 23:39:41 -07:00
2007-07-24 18:38:34 -07:00
unsigned long get_seconds ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-07-27 14:48:13 -04:00
return tk - > xtime_sec ;
2007-07-24 18:38:34 -07:00
}
EXPORT_SYMBOL ( get_seconds ) ;
2009-08-19 19:13:34 -07:00
struct timespec __current_kernel_time ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-07-27 14:48:13 -04:00
2014-07-16 21:04:01 +00:00
return timespec64_to_timespec ( tk_xtime ( tk ) ) ;
2009-08-19 19:13:34 -07:00
}
2007-07-24 18:38:34 -07:00
2007-07-24 17:47:43 -07:00
struct timespec current_kernel_time ( void )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:04:01 +00:00
struct timespec64 now ;
2007-07-24 17:47:43 -07:00
unsigned long seq ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2009-12-22 14:10:37 -08:00
2012-07-27 14:48:13 -04:00
now = tk_xtime ( tk ) ;
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2007-07-24 17:47:43 -07:00
2014-07-16 21:04:01 +00:00
return timespec64_to_timespec ( now ) ;
2007-07-24 17:47:43 -07:00
}
EXPORT_SYMBOL ( current_kernel_time ) ;
2009-08-19 19:13:34 -07:00
2014-11-07 11:20:40 -08:00
struct timespec64 get_monotonic_coarse64 ( void )
2009-08-19 19:13:34 -07:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:04:01 +00:00
struct timespec64 now , mono ;
2009-08-19 19:13:34 -07:00
unsigned long seq ;
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2009-12-22 14:10:37 -08:00
2012-07-27 14:48:13 -04:00
now = tk_xtime ( tk ) ;
mono = tk - > wall_to_monotonic ;
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2009-08-19 19:13:34 -07:00
2014-07-16 21:04:01 +00:00
set_normalized_timespec64 ( & now , now . tv_sec + mono . tv_sec ,
2009-08-19 19:13:34 -07:00
now . tv_nsec + mono . tv_nsec ) ;
2014-07-16 21:04:01 +00:00
2014-11-07 11:20:40 -08:00
return now ;
2009-08-19 19:13:34 -07:00
}
2011-01-27 15:58:55 +01:00
/*
2012-02-28 16:50:11 -08:00
* Must hold jiffies_lock
2011-01-27 15:58:55 +01:00
*/
void do_timer ( unsigned long ticks )
{
jiffies_64 + = ticks ;
calc_global_load ( ticks ) ;
}
2011-01-27 15:59:05 +01:00
/**
2014-07-16 21:03:52 +00:00
* ktime_get_update_offsets_tick - hrtimer helper
* @offs_real: pointer to storage for monotonic -> realtime offset
* @offs_boot: pointer to storage for monotonic -> boottime offset
* @offs_tai: pointer to storage for monotonic -> clock tai offset
*
* Returns monotonic time at last tick and various offsets
2011-01-27 15:59:05 +01:00
*/
2014-07-16 21:03:52 +00:00
ktime_t ktime_get_update_offsets_tick ( ktime_t * offs_real , ktime_t * offs_boot ,
ktime_t * offs_tai )
2011-01-27 15:59:05 +01:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2014-07-16 21:03:52 +00:00
unsigned int seq ;
2014-07-16 21:04:20 +00:00
ktime_t base ;
u64 nsecs ;
2011-01-27 15:59:05 +01:00
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2014-07-16 21:03:52 +00:00
2014-07-16 21:05:16 +00:00
base = tk - > tkr . base_mono ;
nsecs = tk - > tkr . xtime_nsec > > tk - > tkr . shift ;
2014-07-16 21:04:20 +00:00
2014-07-16 21:03:52 +00:00
* offs_real = tk - > offs_real ;
* offs_boot = tk - > offs_boot ;
* offs_tai = tk - > offs_tai ;
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2014-07-16 21:03:52 +00:00
2014-07-16 21:04:20 +00:00
return ktime_add_ns ( base , nsecs ) ;
2011-01-27 15:59:05 +01:00
}
2011-01-27 15:59:10 +01:00
2012-07-10 18:43:24 -04:00
# ifdef CONFIG_HIGH_RES_TIMERS
/**
2014-07-16 21:03:52 +00:00
* ktime_get_update_offsets_now - hrtimer helper
2012-07-10 18:43:24 -04:00
* @offs_real: pointer to storage for monotonic -> realtime offset
* @offs_boot: pointer to storage for monotonic -> boottime offset
2013-10-18 09:13:30 +08:00
* @offs_tai: pointer to storage for monotonic -> clock tai offset
2012-07-10 18:43:24 -04:00
*
* Returns current monotonic time and updates the offsets
2013-10-18 09:13:30 +08:00
* Called from hrtimer_interrupt() or retrigger_next_event()
2012-07-10 18:43:24 -04:00
*/
2014-07-16 21:03:52 +00:00
ktime_t ktime_get_update_offsets_now ( ktime_t * offs_real , ktime_t * offs_boot ,
2013-01-21 17:00:11 -08:00
ktime_t * offs_tai )
2012-07-10 18:43:24 -04:00
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2012-07-10 18:43:24 -04:00
unsigned int seq ;
2014-07-16 21:04:19 +00:00
ktime_t base ;
u64 nsecs ;
2012-07-10 18:43:24 -04:00
do {
2014-07-16 21:04:07 +00:00
seq = read_seqcount_begin ( & tk_core . seq ) ;
2012-07-10 18:43:24 -04:00
2014-07-16 21:05:16 +00:00
base = tk - > tkr . base_mono ;
2014-07-16 21:05:18 +00:00
nsecs = timekeeping_get_ns ( & tk - > tkr ) ;
2012-07-10 18:43:24 -04:00
2012-07-27 14:48:13 -04:00
* offs_real = tk - > offs_real ;
* offs_boot = tk - > offs_boot ;
2013-01-21 17:00:11 -08:00
* offs_tai = tk - > offs_tai ;
2014-07-16 21:04:07 +00:00
} while ( read_seqcount_retry ( & tk_core . seq , seq ) ) ;
2012-07-10 18:43:24 -04:00
2014-07-16 21:04:19 +00:00
return ktime_add_ns ( base , nsecs ) ;
2012-07-10 18:43:24 -04:00
}
# endif
2013-03-22 11:31:29 -07:00
/**
* do_adjtimex() - Accessor function to NTP __do_adjtimex function
*/
int do_adjtimex ( struct timex * txc )
{
2014-07-16 21:04:07 +00:00
struct timekeeper * tk = & tk_core . timekeeper ;
2013-03-22 11:37:28 -07:00
unsigned long flags ;
2014-07-16 21:04:01 +00:00
struct timespec64 ts ;
2013-04-10 12:41:49 -07:00
s32 orig_tai , tai ;
2013-03-22 12:08:52 -07:00
int ret ;
/* Validate the data before disabling interrupts */
ret = ntp_validate_timex ( txc ) ;
if ( ret )
return ret ;
2013-03-22 15:04:13 -07:00
if ( txc - > modes & ADJ_SETOFFSET ) {
struct timespec delta ;
delta . tv_sec = txc - > time . tv_sec ;
delta . tv_nsec = txc - > time . tv_usec ;
if ( ! ( txc - > modes & ADJ_NANO ) )
delta . tv_nsec * = 1000 ;
ret = timekeeping_inject_offset ( & delta ) ;
if ( ret )
return ret ;
}
2014-07-16 21:04:04 +00:00
getnstimeofday64 ( & ts ) ;
2013-03-22 11:31:29 -07:00
2013-03-22 11:37:28 -07:00
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2013-03-22 11:37:28 -07:00
2013-04-10 12:41:49 -07:00
orig_tai = tai = tk - > tai_offset ;
2013-03-22 12:28:15 -07:00
ret = __do_adjtimex ( txc , & ts , & tai ) ;
2013-04-10 12:41:49 -07:00
if ( tai ! = orig_tai ) {
__timekeeping_set_tai_offset ( tk , tai ) ;
2013-12-11 18:50:25 -08:00
timekeeping_update ( tk , TK_MIRROR | TK_CLOCK_WAS_SET ) ;
2013-04-10 12:41:49 -07:00
}
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-03-22 11:37:28 -07:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2013-12-10 17:18:18 -08:00
if ( tai ! = orig_tai )
clock_was_set ( ) ;
2013-09-11 16:50:56 -07:00
ntp_notify_cmos_timer ( ) ;
2013-03-22 12:28:15 -07:00
return ret ;
}
2013-03-22 11:31:29 -07:00
# ifdef CONFIG_NTP_PPS
/**
* hardpps() - Accessor function to NTP __hardpps function
*/
void hardpps ( const struct timespec * phase_ts , const struct timespec * raw_ts )
{
2013-03-22 11:37:28 -07:00
unsigned long flags ;
raw_spin_lock_irqsave ( & timekeeper_lock , flags ) ;
2014-07-16 21:04:07 +00:00
write_seqcount_begin ( & tk_core . seq ) ;
2013-03-22 11:37:28 -07:00
2013-03-22 11:31:29 -07:00
__hardpps ( phase_ts , raw_ts ) ;
2013-03-22 11:37:28 -07:00
2014-07-16 21:04:07 +00:00
write_seqcount_end ( & tk_core . seq ) ;
2013-03-22 11:37:28 -07:00
raw_spin_unlock_irqrestore ( & timekeeper_lock , flags ) ;
2013-03-22 11:31:29 -07:00
}
EXPORT_SYMBOL ( hardpps ) ;
# endif
2011-01-27 15:59:10 +01:00
/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
*
* Must be called with interrupts disabled.
*/
void xtime_update ( unsigned long ticks )
{
2012-02-28 16:50:11 -08:00
write_seqlock ( & jiffies_lock ) ;
2011-01-27 15:59:10 +01:00
do_timer ( ticks ) ;
2012-02-28 16:50:11 -08:00
write_sequnlock ( & jiffies_lock ) ;
2013-12-12 13:10:55 -08:00
update_wall_time ( ) ;
2011-01-27 15:59:10 +01:00
}