timekeeping.c 64.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 *  linux/kernel/time/timekeeping.c
 *
 *  Kernel timekeeping code and accessor functions
 *
 *  This code was moved from linux/kernel/timer.c.
 *  Please see that file for copyright and history logs.
 *
 */

11
#include <linux/timekeeper_internal.h>
12 13 14 15 16
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mm.h>
17
#include <linux/nmi.h>
18
#include <linux/sched.h>
19
#include <linux/sched/loadavg.h>
20
#include <linux/syscore_ops.h>
21 22 23 24
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
#include <linux/tick.h>
25
#include <linux/stop_machine.h>
26
#include <linux/pvclock_gtod.h>
27
#include <linux/compiler.h>
28

29
#include "tick-internal.h"
30
#include "ntp_internal.h"
31
#include "timekeeping_internal.h"
32

33 34
#define TK_CLEAR_NTP		(1 << 0)
#define TK_MIRROR		(1 << 1)
35
#define TK_CLOCK_WAS_SET	(1 << 2)
36

37 38 39 40 41 42 43 44 45
/*
 * The most important data for readout fits into a single 64 byte
 * cache line.
 */
static struct {
	seqcount_t		seq;
	struct timekeeper	timekeeper;
} tk_core ____cacheline_aligned;

46
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
47
static struct timekeeper shadow_timekeeper;
48

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/**
 * struct tk_fast - NMI safe timekeeper
 * @seq:	Sequence counter for protecting updates. The lowest bit
 *		is the index for the tk_read_base array
 * @base:	tk_read_base array. Access is indexed by the lowest bit of
 *		@seq.
 *
 * See @update_fast_timekeeper() below.
 */
struct tk_fast {
	seqcount_t		seq;
	struct tk_read_base	base[2];
};

static struct tk_fast tk_fast_mono ____cacheline_aligned;
Peter Zijlstra's avatar
Peter Zijlstra committed
64
static struct tk_fast tk_fast_raw  ____cacheline_aligned;
65

66 67 68
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;

69 70
static inline void tk_normalize_xtime(struct timekeeper *tk)
{
71 72
	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
73 74
		tk->xtime_sec++;
	}
75 76 77 78
	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
		tk->raw_sec++;
	}
79 80
}

81 82 83 84 85
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
	struct timespec64 ts;

	ts.tv_sec = tk->xtime_sec;
86
	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
87 88 89
	return ts;
}

90
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
91 92
{
	tk->xtime_sec = ts->tv_sec;
93
	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 95
}

96
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
97 98
{
	tk->xtime_sec += ts->tv_sec;
99
	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
100
	tk_normalize_xtime(tk);
101
}
102

103
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
104
{
105
	struct timespec64 tmp;
106 107 108 109 110

	/*
	 * Verify consistency of: offset_real = -wall_to_monotonic
	 * before modifying anything
	 */
111
	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
112
					-tk->wall_to_monotonic.tv_nsec);
Thomas Gleixner's avatar
Thomas Gleixner committed
113
	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
114
	tk->wall_to_monotonic = wtm;
115 116
	set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
	tk->offs_real = timespec64_to_ktime(tmp);
117
	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
118 119
}

120
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
121
{
122
	tk->offs_boot = ktime_add(tk->offs_boot, delta);
123 124
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/*
 * tk_clock_read - atomic clocksource read() helper
 *
 * This helper is necessary to use in the read paths because, while the
 * seqlock ensures we don't return a bad value while structures are updated,
 * it doesn't protect from potential crashes. There is the possibility that
 * the tkr's clocksource may change between the read reference, and the
 * clock reference passed to the read function.  This can cause crashes if
 * the wrong clocksource is passed to the wrong read function.
 * This isn't necessary to use when holding the timekeeper_lock or doing
 * a read of the fast-timekeeper tkrs (which is protected by its own locking
 * and update logic).
 */
static inline u64 tk_clock_read(struct tk_read_base *tkr)
{
	struct clocksource *clock = READ_ONCE(tkr->clock);

	return clock->read(clock);
}

145
#ifdef CONFIG_DEBUG_TIMEKEEPING
146 147
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */

148
static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
149 150
{

151
	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
152
	const char *name = tk->tkr_mono.clock->name;
153 154

	if (offset > max_cycles) {
155
		printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
156
				offset, name, max_cycles);
157
		printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
158 159
	} else {
		if (offset > (max_cycles >> 1)) {
Masanari Iida's avatar
Masanari Iida committed
160
			printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
161 162 163 164
					offset, name, max_cycles >> 1);
			printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
		}
	}
165

166 167
	if (tk->underflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
168 169 170
			printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
171
			tk->last_warning = jiffies;
172
		}
173
		tk->underflow_seen = 0;
174 175
	}

176 177
	if (tk->overflow_seen) {
		if (jiffies - tk->last_warning > WARNING_FREQ) {
178 179 180
			printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
			printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
			printk_deferred("         Your kernel is probably still fine.\n");
181
			tk->last_warning = jiffies;
182
		}
183
		tk->overflow_seen = 0;
184
	}
185
}
186

187
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
188
{
189
	struct timekeeper *tk = &tk_core.timekeeper;
190
	u64 now, last, mask, max, delta;
191
	unsigned int seq;
192

193 194 195 196 197 198 199 200 201
	/*
	 * Since we're called holding a seqlock, the data may shift
	 * under us while we're doing the calculation. This can cause
	 * false positives, since we'd note a problem but throw the
	 * results away. So nest another seqlock here to atomically
	 * grab the points we are checking with.
	 */
	do {
		seq = read_seqcount_begin(&tk_core.seq);
202
		now = tk_clock_read(tkr);
203 204 205 206
		last = tkr->cycle_last;
		mask = tkr->mask;
		max = tkr->clock->max_cycles;
	} while (read_seqcount_retry(&tk_core.seq, seq));
207

208
	delta = clocksource_delta(now, last, mask);
209

210 211 212 213
	/*
	 * Try to catch underflows by checking if we are seeing small
	 * mask-relative negative values.
	 */
214
	if (unlikely((~delta & mask) < (mask >> 3))) {
215
		tk->underflow_seen = 1;
216
		delta = 0;
217
	}
218

219
	/* Cap delta value to the max_cycles values to avoid mult overflows */
220
	if (unlikely(delta > max)) {
221
		tk->overflow_seen = 1;
222
		delta = tkr->clock->max_cycles;
223
	}
224 225 226

	return delta;
}
227
#else
228
static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
229 230
{
}
231
static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
232
{
233
	u64 cycle_now, delta;
234 235

	/* read clocksource */
236
	cycle_now = tk_clock_read(tkr);
237 238 239 240 241 242

	/* calculate the delta since the last update_wall_time */
	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);

	return delta;
}
243 244
#endif

245
/**
246
 * tk_setup_internals - Set up internals to use clocksource clock.
247
 *
248
 * @tk:		The target timekeeper to setup.
249 250 251 252 253 254 255
 * @clock:		Pointer to clocksource.
 *
 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
 * pair and interval request.
 *
 * Unless you're the timekeeping code, you should not be using this!
 */
256
static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
257
{
258
	u64 interval;
259
	u64 tmp, ntpinterval;
260
	struct clocksource *old_clock;
261

262
	++tk->cs_was_changed_seq;
263 264 265
	old_clock = tk->tkr_mono.clock;
	tk->tkr_mono.clock = clock;
	tk->tkr_mono.mask = clock->mask;
266
	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
267

268 269 270 271
	tk->tkr_raw.clock = clock;
	tk->tkr_raw.mask = clock->mask;
	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;

272 273 274
	/* Do the ns -> cycle conversion first, using original mult */
	tmp = NTP_INTERVAL_LENGTH;
	tmp <<= clock->shift;
275
	ntpinterval = tmp;
276 277
	tmp += clock->mult/2;
	do_div(tmp, clock->mult);
278 279 280
	if (tmp == 0)
		tmp = 1;

281
	interval = (u64) tmp;
282
	tk->cycle_interval = interval;
283 284

	/* Go back from cycles -> shifted ns */
285
	tk->xtime_interval = interval * clock->mult;
286
	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
287
	tk->raw_interval = interval * clock->mult;
288

289 290 291
	 /* if changing clocks, convert xtime_nsec shift units */
	if (old_clock) {
		int shift_change = clock->shift - old_clock->shift;
292
		if (shift_change < 0) {
293
			tk->tkr_mono.xtime_nsec >>= -shift_change;
294 295
			tk->tkr_raw.xtime_nsec >>= -shift_change;
		} else {
296
			tk->tkr_mono.xtime_nsec <<= shift_change;
297 298
			tk->tkr_raw.xtime_nsec <<= shift_change;
		}
299
	}
300

301
	tk->tkr_mono.shift = clock->shift;
302
	tk->tkr_raw.shift = clock->shift;
303

304 305
	tk->ntp_error = 0;
	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
306
	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
307 308 309 310 311 312

	/*
	 * The timekeeper keeps its own mult values for the currently
	 * active clocksource. These value will be adjusted via NTP
	 * to counteract clock drifting.
	 */
313
	tk->tkr_mono.mult = clock->mult;
314
	tk->tkr_raw.mult = clock->mult;
315
	tk->ntp_err_mult = 0;
316
}
317

318
/* Timekeeper helper functions. */
319 320

#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
321 322
static u32 default_arch_gettimeoffset(void) { return 0; }
u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
323
#else
324
static inline u32 arch_gettimeoffset(void) { return 0; }
325 326
#endif

327
static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
328
{
329
	u64 nsec;
330 331 332 333 334 335 336 337

	nsec = delta * tkr->mult + tkr->xtime_nsec;
	nsec >>= tkr->shift;

	/* If arch requires, add in get_arch_timeoffset() */
	return nsec + arch_gettimeoffset();
}

338
static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
339
{
340
	u64 delta;
341

342
	delta = timekeeping_get_delta(tkr);
343 344
	return timekeeping_delta_to_ns(tkr, delta);
}
345

346
static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
347
{
348
	u64 delta;
349

350 351 352
	/* calculate the delta since the last update_wall_time */
	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
	return timekeeping_delta_to_ns(tkr, delta);
353 354
}

355 356
/**
 * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper.
357
 * @tkr: Timekeeping readout base from which we take the update
358 359 360 361
 *
 * We want to use this from any context including NMI and tracing /
 * instrumenting the timekeeping code itself.
 *
362
 * Employ the latch technique; see @raw_write_seqcount_latch.
363 364 365 366 367 368
 *
 * So if a NMI hits the update of base[0] then it will use base[1]
 * which is still consistent. In the worst case this can result is a
 * slightly wrong timestamp (a few nanoseconds). See
 * @ktime_get_mono_fast_ns.
 */
369
static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
370
{
371
	struct tk_read_base *base = tkf->base;
372 373

	/* Force readers off to base[1] */
374
	raw_write_seqcount_latch(&tkf->seq);
375 376

	/* Update base[0] */
377
	memcpy(base, tkr, sizeof(*base));
378 379

	/* Force readers back to base[0] */
380
	raw_write_seqcount_latch(&tkf->seq);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417

	/* Update base[1] */
	memcpy(base + 1, base, sizeof(*base));
}

/**
 * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic
 *
 * This timestamp is not guaranteed to be monotonic across an update.
 * The timestamp is calculated by:
 *
 *	now = base_mono + clock_delta * slope
 *
 * So if the update lowers the slope, readers who are forced to the
 * not yet updated second array are still using the old steeper slope.
 *
 * tmono
 * ^
 * |    o  n
 * |   o n
 * |  u
 * | o
 * |o
 * |12345678---> reader order
 *
 * o = old slope
 * u = update
 * n = new slope
 *
 * So reader 6 will observe time going backwards versus reader 5.
 *
 * While other CPUs are likely to be able observe that, the only way
 * for a CPU local observation is when an NMI hits in the middle of
 * the update. Timestamps taken from that NMI context might be ahead
 * of the following timestamps. Callers need to be aware of that and
 * deal with it.
 */
418
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
419 420 421 422 423 424
{
	struct tk_read_base *tkr;
	unsigned int seq;
	u64 now;

	do {
425
		seq = raw_read_seqcount_latch(&tkf->seq);
426
		tkr = tkf->base + (seq & 0x01);
427 428
		now = ktime_to_ns(tkr->base);

429 430
		now += timekeeping_delta_to_ns(tkr,
				clocksource_delta(
431
					tk_clock_read(tkr),
432 433
					tkr->cycle_last,
					tkr->mask));
434
	} while (read_seqcount_retry(&tkf->seq, seq));
435 436 437

	return now;
}
438 439 440 441 442

u64 ktime_get_mono_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_mono);
}
443 444
EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);

Peter Zijlstra's avatar
Peter Zijlstra committed
445 446 447 448 449 450
u64 ktime_get_raw_fast_ns(void)
{
	return __ktime_get_fast_ns(&tk_fast_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
/**
 * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
 *
 * To keep it NMI safe since we're accessing from tracing, we're not using a
 * separate timekeeper with updates to monotonic clock and boot offset
 * protected with seqlocks. This has the following minor side effects:
 *
 * (1) Its possible that a timestamp be taken after the boot offset is updated
 * but before the timekeeper is updated. If this happens, the new boot offset
 * is added to the old timekeeping making the clock appear to update slightly
 * earlier:
 *    CPU 0                                        CPU 1
 *    timekeeping_inject_sleeptime64()
 *    __timekeeping_inject_sleeptime(tk, delta);
 *                                                 timestamp();
 *    timekeeping_update(tk, TK_CLEAR_NTP...);
 *
 * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
 * partially updated.  Since the tk->offs_boot update is a rare event, this
 * should be a rare occurrence which postprocessing should be able to handle.
 */
u64 notrace ktime_get_boot_fast_ns(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
}
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);

480
/* Suspend-time cycles value for halted fast timekeeper. */
481
static u64 cycles_at_suspend;
482

483
static u64 dummy_clock_read(struct clocksource *cs)
484 485 486 487
{
	return cycles_at_suspend;
}

488 489 490 491
static struct clocksource dummy_clock = {
	.read = dummy_clock_read,
};

492 493 494 495 496 497 498 499 500 501 502 503 504
/**
 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
 * @tk: Timekeeper to snapshot.
 *
 * It generally is unsafe to access the clocksource after timekeeping has been
 * suspended, so take a snapshot of the readout base of @tk and use it as the
 * fast timekeeper's readout base while suspended.  It will return the same
 * number of cycles every time until timekeeping is resumed at which time the
 * proper readout base for the fast timekeeper will be restored automatically.
 */
static void halt_fast_timekeeper(struct timekeeper *tk)
{
	static struct tk_read_base tkr_dummy;
505
	struct tk_read_base *tkr = &tk->tkr_mono;
506 507

	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
508 509
	cycles_at_suspend = tk_clock_read(tkr);
	tkr_dummy.clock = &dummy_clock;
510
	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
Peter Zijlstra's avatar
Peter Zijlstra committed
511 512 513

	tkr = &tk->tkr_raw;
	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
514
	tkr_dummy.clock = &dummy_clock;
Peter Zijlstra's avatar
Peter Zijlstra committed
515
	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
516 517
}

518
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
519
#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
520 521 522

static inline void update_vsyscall(struct timekeeper *tk)
{
523
	struct timespec xt, wm;
524

525
	xt = timespec64_to_timespec(tk_xtime(tk));
526
	wm = timespec64_to_timespec(tk->wall_to_monotonic);
527
	update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
528
			    tk->tkr_mono.shift, tk->tkr_mono.cycle_last);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
}

static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
	s64 remainder;

	/*
	* Store only full nanoseconds into xtime_nsec after rounding
	* it up and add the remainder to the error difference.
	* XXX - This is necessary to avoid small 1ns inconsistnecies caused
	* by truncating the remainder in vsyscalls. However, it causes
	* additional work to be done in timekeeping_adjust(). Once
	* the vsyscall implementations are converted to use xtime_nsec
	* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
	* users are removed, this can be killed.
	*/
545
	remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
546 547 548 549 550 551
	if (remainder != 0) {
		tk->tkr_mono.xtime_nsec -= remainder;
		tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
		tk->ntp_error += remainder << tk->ntp_error_shift;
		tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
	}
552 553 554 555 556
}
#else
#define old_vsyscall_fixup(tk)
#endif

557 558
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);

559
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
560
{
561
	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
562 563 564 565 566 567 568
}

/**
 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 */
int pvclock_gtod_register_notifier(struct notifier_block *nb)
{
569
	struct timekeeper *tk = &tk_core.timekeeper;
570 571 572
	unsigned long flags;
	int ret;

573
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
574
	ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
575
	update_pvclock_gtod(tk, true);
576
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
577 578 579 580 581 582 583 584 585 586 587 588 589 590

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);

/**
 * pvclock_gtod_unregister_notifier - unregister a pvclock
 * timedata update listener
 */
int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
{
	unsigned long flags;
	int ret;

591
	raw_spin_lock_irqsave(&timekeeper_lock, flags);
592
	ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
593
	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
594 595 596 597 598

	return ret;
}
EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);

599 600 601 602 603 604
/*
 * tk_update_leap_state - helper to update the next_leap_ktime
 */
static inline void tk_update_leap_state(struct timekeeper *tk)
{
	tk->next_leap_ktime = ntp_get_next_leap();
Thomas Gleixner's avatar
Thomas Gleixner committed
605
	if (tk->next_leap_ktime != KTIME_MAX)
606 607 608 609
		/* Convert to monotonic time */
		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
}

610 611 612 613 614
/*
 * Update the ktime_t based scalar nsec members of the timekeeper
 */
static inline void tk_update_ktime_data(struct timekeeper *tk)
{
615 616
	u64 seconds;
	u32 nsec;
617 618 619 620 621 622 623 624

	/*
	 * The xtime based monotonic readout is:
	 *	nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now();
	 * The ktime based monotonic readout is:
	 *	nsec = base_mono + now();
	 * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec
	 */
625 626
	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
627
	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
628

629 630 631 632 633
	/*
	 * The sum of the nanoseconds portions of xtime and
	 * wall_to_monotonic can be greater/equal one second. Take
	 * this into account before updating tk->ktime_sec.
	 */
634
	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
635 636 637
	if (nsec >= NSEC_PER_SEC)
		seconds++;
	tk->ktime_sec = seconds;
638 639

	/* Update the monotonic raw base */
640
	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
641 642
}

643
/* must hold timekeeper_lock */
644
static void timekeeping_update(struct timekeeper *tk, unsigned int action)
645
{
646
	if (action & TK_CLEAR_NTP) {
647
		tk->ntp_error = 0;
648 649
		ntp_clear();
	}
650

651
	tk_update_leap_state(tk);
652 653
	tk_update_ktime_data(tk);

654 655 656
	update_vsyscall(tk);
	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);

657
	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
Peter Zijlstra's avatar
Peter Zijlstra committed
658
	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
659 660 661

	if (action & TK_CLOCK_WAS_SET)
		tk->clock_was_set_seq++;
662 663 664 665 666 667 668 669
	/*
	 * The mirroring of the data to the shadow-timekeeper needs
	 * to happen last here to ensure we don't over-write the
	 * timekeeper structure on the next update with stale data
	 */
	if (action & TK_MIRROR)
		memcpy(&shadow_timekeeper, &tk_core.timekeeper,
		       sizeof(tk_core.timekeeper));
670 671
}

672
/**
673
 * timekeeping_forward_now - update clock to the current time
674
 *
675 676 677
 * Forward the current clock to update its state since the last call to
 * update_wall_time(). This is useful before significant clock changes,
 * as it avoids having to deal with this time offset explicitly.
678
 */
679
static void timekeeping_forward_now(struct timekeeper *tk)
680
{
681
	u64 cycle_now, delta;
682

683
	cycle_now = tk_clock_read(&tk->tkr_mono);
684 685
	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
	tk->tkr_mono.cycle_last = cycle_now;
686
	tk->tkr_raw.cycle_last  = cycle_now;
687

688
	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
689

690
	/* If arch requires, add in get_arch_timeoffset() */
691
	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
692

693

694 695 696 697 698 699
	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;

	/* If arch requires, add in get_arch_timeoffset() */
	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;

	tk_normalize_xtime(tk);
700 701 702
}

/**
703
 * __getnstimeofday64 - Returns the time of day in a timespec64.
704 705
 * @ts:		pointer to the timespec to be set
 *
706 707
 * Updates the time of day in the timespec.
 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
708
 */
709
int __getnstimeofday64(struct timespec64 *ts)
710
{
711
	struct timekeeper *tk = &tk_core.timekeeper;
712
	unsigned long seq;
713
	u64 nsecs;
714 715

	do {
716
		seq = read_seqcount_begin(&tk_core.seq);
717

718
		ts->tv_sec = tk->xtime_sec;
719
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
720

721
	} while (read_seqcount_retry(&tk_core.seq, seq));
722

723
	ts->tv_nsec = 0;
724
	timespec64_add_ns(ts, nsecs);
725 726 727 728 729 730 731 732 733

	/*
	 * Do not bail out early, in case there were callers still using
	 * the value, even in the face of the WARN_ON.
	 */
	if (unlikely(timekeeping_suspended))
		return -EAGAIN;
	return 0;
}
734
EXPORT_SYMBOL(__getnstimeofday64);
735 736

/**
737
 * getnstimeofday64 - Returns the time of day in a timespec64.
738
 * @ts:		pointer to the timespec64 to be set
739
 *
740
 * Returns the time of day in a timespec64 (WARN if suspended).
741
 */
742
void getnstimeofday64(struct timespec64 *ts)
743
{
744
	WARN_ON(__getnstimeofday64(ts));
745
}
746
EXPORT_SYMBOL(getnstimeofday64);
747

748 749
ktime_t ktime_get(void)
{
750
	struct timekeeper *tk = &tk_core.timekeeper;
751
	unsigned int seq;
752
	ktime_t base;
753
	u64 nsecs;
754 755 756 757

	WARN_ON(timekeeping_suspended);

	do {
758
		seq = read_seqcount_begin(&tk_core.seq);
759 760
		base = tk->tkr_mono.base;
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
761

762
	} while (read_seqcount_retry(&tk_core.seq, seq));
763

764
	return ktime_add_ns(base, nsecs);
765 766 767
}
EXPORT_SYMBOL_GPL(ktime_get);

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
u32 ktime_get_resolution_ns(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	u32 nsecs;

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return nsecs;
}
EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);

785 786 787 788 789 790 791 792 793 794 795
static ktime_t *offsets[TK_OFFS_MAX] = {
	[TK_OFFS_REAL]	= &tk_core.timekeeper.offs_real,
	[TK_OFFS_BOOT]	= &tk_core.timekeeper.offs_boot,
	[TK_OFFS_TAI]	= &tk_core.timekeeper.offs_tai,
};

ktime_t ktime_get_with_offset(enum tk_offsets offs)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base, *offset = offsets[offs];
796
	u64 nsecs;
797 798 799 800 801

	WARN_ON(timekeeping_suspended);

	do {
		seq = read_seqcount_begin(&tk_core.seq);
802 803
		base = ktime_add(tk->tkr_mono.base, *offset);
		nsecs = timekeeping_get_ns(&tk->tkr_mono);
804 805 806 807 808 809 810 811

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);

}
EXPORT_SYMBOL_GPL(ktime_get_with_offset);

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
/**
 * ktime_mono_to_any() - convert mononotic time to any other time
 * @tmono:	time to convert.
 * @offs:	which offset to use
 */
ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
{
	ktime_t *offset = offsets[offs];
	unsigned long seq;
	ktime_t tconv;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		tconv = ktime_add(tmono, *offset);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	return tconv;
}
EXPORT_SYMBOL_GPL(ktime_mono_to_any);

832 833 834 835 836 837 838 839
/**
 * ktime_get_raw - Returns the raw monotonic time in ktime_t format
 */
ktime_t ktime_get_raw(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned int seq;
	ktime_t base;
840
	u64 nsecs;
841 842 843

	do {
		seq = read_seqcount_begin(&tk_core.seq);
844 845
		base = tk->tkr_raw.base;
		nsecs = timekeeping_get_ns(&tk->tkr_raw);
846 847 848 849 850 851 852

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return ktime_add_ns(base, nsecs);
}
EXPORT_SYMBOL_GPL(ktime_get_raw);

853
/**
854
 * ktime_get_ts64 - get the monotonic clock in timespec64 format
855 856 857 858
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
859
 * in normalized timespec64 format in the variable pointed to by @ts.
860
 */
861
void ktime_get_ts64(struct timespec64 *ts)
862
{
863
	struct timekeeper *tk = &tk_core.timekeeper;
864
	struct timespec64 tomono;
865
	unsigned int seq;
866
	u64 nsec;
867 868 869 870

	WARN_ON(timekeeping_suspended);

	do {
871
		seq = read_seqcount_begin(&tk_core.seq);
872
		ts->tv_sec = tk->xtime_sec;
873
		nsec = timekeeping_get_ns(&tk->tkr_mono);
874
		tomono = tk->wall_to_monotonic;
875

876
	} while (read_seqcount_retry(&tk_core.seq, seq));
877

878 879 880
	ts->tv_sec += tomono.tv_sec;
	ts->tv_nsec = 0;
	timespec64_add_ns(ts, nsec + tomono.tv_nsec);
881
}
882
EXPORT_SYMBOL_GPL(ktime_get_ts64);
883

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
/**
 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
 *
 * Returns the seconds portion of CLOCK_MONOTONIC with a single non
 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
 * works on both 32 and 64 bit systems. On 32 bit systems the readout
 * covers ~136 years of uptime which should be enough to prevent
 * premature wrap arounds.
 */
time64_t ktime_get_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	WARN_ON(timekeeping_suspended);
	return tk->ktime_sec;
}
EXPORT_SYMBOL_GPL(ktime_get_seconds);

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
/**
 * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
 *
 * Returns the wall clock seconds since 1970. This replaces the
 * get_seconds() interface which is not y2038 safe on 32bit systems.
 *
 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
 * 32bit systems the access must be protected with the sequence
 * counter to provide "atomic" access to the 64bit tk->xtime_sec
 * value.
 */
time64_t ktime_get_real_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	time64_t seconds;
	unsigned int seq;

	if (IS_ENABLED(CONFIG_64BIT))
		return tk->xtime_sec;

	do {
		seq = read_seqcount_begin(&tk_core.seq);
		seconds = tk->xtime_sec;

	} while (read_seqcount_retry(&tk_core.seq, seq));

	return seconds;
}
EXPORT_SYMBOL_GPL(ktime_get_real_seconds);

932 933 934 935 936 937 938 939 940 941 942 943
/**
 * __ktime_get_real_seconds - The same as ktime_get_real_seconds
 * but without the sequence counter protect. This internal function
 * is called just when timekeeping lock is already held.
 */
time64_t __ktime_get_real_seconds(void)
{
	struct timekeeper *tk = &tk_core.timekeeper;

	return tk->xtime_sec;
}

944 945 946 947 948 949 950 951 952 953
/**
 * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter
 * @systime_snapshot:	pointer to struct receiving the system time snapshot
 */
void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
{
	struct timekeeper *tk = &tk_core.timekeeper;
	unsigned long seq;
	ktime_t base_raw;
	ktime_t base_real;
954 955
	u64 nsec_raw;
	u64 nsec_real;
956
	u64 now;
957

958 959
	WARN_ON_ONCE(timekeeping_suspended);

960 961
	do {
		seq = read_seqcount_begin(&tk_core.seq);
962
		now = tk_clock_read(&tk->tkr_mono);
963 964
		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
965 966 967 968 969 970 971 972 973 974 975 976
		base_real = ktime_add(tk->tkr_mono.base,
				      tk_core.timekeeper.offs_real);
		base_raw = tk->tkr_raw.base;
		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
	} while (read_seqcount_retry(&tk_core.seq, seq));

	systime_snapshot->cycles = now;
	systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
	systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
}
EXPORT_SYMBOL_GPL(ktime_get_snapshot);