diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
-rw-r--r-- | kernel/time/timekeeping.c | 47 |
1 files changed, 34 insertions, 13 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5fa544f3f560..738f3467d169 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) | |||
116 | tk->offs_boot = ktime_add(tk->offs_boot, delta); | 116 | tk->offs_boot = ktime_add(tk->offs_boot, delta); |
117 | } | 117 | } |
118 | 118 | ||
119 | /* | ||
120 | * tk_clock_read - atomic clocksource read() helper | ||
121 | * | ||
122 | * This helper is necessary to use in the read paths because, while the | ||
123 | * seqlock ensures we don't return a bad value while structures are updated, | ||
124 | * it doesn't protect from potential crashes. There is the possibility that | ||
125 | * the tkr's clocksource may change between the read reference, and the | ||
126 | * clock reference passed to the read function. This can cause crashes if | ||
127 | * the wrong clocksource is passed to the wrong read function. | ||
128 | * This isn't necessary to use when holding the timekeeper_lock or doing | ||
129 | * a read of the fast-timekeeper tkrs (which is protected by its own locking | ||
130 | * and update logic). | ||
131 | */ | ||
132 | static inline u64 tk_clock_read(struct tk_read_base *tkr) | ||
133 | { | ||
134 | struct clocksource *clock = READ_ONCE(tkr->clock); | ||
135 | |||
136 | return clock->read(clock); | ||
137 | } | ||
138 | |||
119 | #ifdef CONFIG_DEBUG_TIMEKEEPING | 139 | #ifdef CONFIG_DEBUG_TIMEKEEPING |
120 | #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ | 140 | #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ |
121 | 141 | ||
@@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) | |||
173 | */ | 193 | */ |
174 | do { | 194 | do { |
175 | seq = read_seqcount_begin(&tk_core.seq); | 195 | seq = read_seqcount_begin(&tk_core.seq); |
176 | now = tkr->read(tkr->clock); | 196 | now = tk_clock_read(tkr); |
177 | last = tkr->cycle_last; | 197 | last = tkr->cycle_last; |
178 | mask = tkr->mask; | 198 | mask = tkr->mask; |
179 | max = tkr->clock->max_cycles; | 199 | max = tkr->clock->max_cycles; |
@@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) | |||
207 | cycle_t cycle_now, delta; | 227 | cycle_t cycle_now, delta; |
208 | 228 | ||
209 | /* read clocksource */ | 229 | /* read clocksource */ |
210 | cycle_now = tkr->read(tkr->clock); | 230 | cycle_now = tk_clock_read(tkr); |
211 | 231 | ||
212 | /* calculate the delta since the last update_wall_time */ | 232 | /* calculate the delta since the last update_wall_time */ |
213 | delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); | 233 | delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); |
@@ -235,12 +255,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
235 | 255 | ||
236 | old_clock = tk->tkr_mono.clock; | 256 | old_clock = tk->tkr_mono.clock; |
237 | tk->tkr_mono.clock = clock; | 257 | tk->tkr_mono.clock = clock; |
238 | tk->tkr_mono.read = clock->read; | ||
239 | tk->tkr_mono.mask = clock->mask; | 258 | tk->tkr_mono.mask = clock->mask; |
240 | tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); | 259 | tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); |
241 | 260 | ||
242 | tk->tkr_raw.clock = clock; | 261 | tk->tkr_raw.clock = clock; |
243 | tk->tkr_raw.read = clock->read; | ||
244 | tk->tkr_raw.mask = clock->mask; | 262 | tk->tkr_raw.mask = clock->mask; |
245 | tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; | 263 | tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; |
246 | 264 | ||
@@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) | |||
404 | 422 | ||
405 | now += timekeeping_delta_to_ns(tkr, | 423 | now += timekeeping_delta_to_ns(tkr, |
406 | clocksource_delta( | 424 | clocksource_delta( |
407 | tkr->read(tkr->clock), | 425 | tk_clock_read(tkr), |
408 | tkr->cycle_last, | 426 | tkr->cycle_last, |
409 | tkr->mask)); | 427 | tkr->mask)); |
410 | } while (read_seqcount_retry(&tkf->seq, seq)); | 428 | } while (read_seqcount_retry(&tkf->seq, seq)); |
@@ -461,6 +479,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs) | |||
461 | return cycles_at_suspend; | 479 | return cycles_at_suspend; |
462 | } | 480 | } |
463 | 481 | ||
482 | static struct clocksource dummy_clock = { | ||
483 | .read = dummy_clock_read, | ||
484 | }; | ||
485 | |||
464 | /** | 486 | /** |
465 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. | 487 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. |
466 | * @tk: Timekeeper to snapshot. | 488 | * @tk: Timekeeper to snapshot. |
@@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) | |||
477 | struct tk_read_base *tkr = &tk->tkr_mono; | 499 | struct tk_read_base *tkr = &tk->tkr_mono; |
478 | 500 | ||
479 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 501 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
480 | cycles_at_suspend = tkr->read(tkr->clock); | 502 | cycles_at_suspend = tk_clock_read(tkr); |
481 | tkr_dummy.read = dummy_clock_read; | 503 | tkr_dummy.clock = &dummy_clock; |
482 | update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); | 504 | update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); |
483 | 505 | ||
484 | tkr = &tk->tkr_raw; | 506 | tkr = &tk->tkr_raw; |
485 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | 507 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); |
486 | tkr_dummy.read = dummy_clock_read; | 508 | tkr_dummy.clock = &dummy_clock; |
487 | update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); | 509 | update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); |
488 | } | 510 | } |
489 | 511 | ||
@@ -647,11 +669,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
647 | */ | 669 | */ |
648 | static void timekeeping_forward_now(struct timekeeper *tk) | 670 | static void timekeeping_forward_now(struct timekeeper *tk) |
649 | { | 671 | { |
650 | struct clocksource *clock = tk->tkr_mono.clock; | ||
651 | cycle_t cycle_now, delta; | 672 | cycle_t cycle_now, delta; |
652 | s64 nsec; | 673 | s64 nsec; |
653 | 674 | ||
654 | cycle_now = tk->tkr_mono.read(clock); | 675 | cycle_now = tk_clock_read(&tk->tkr_mono); |
655 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); | 676 | delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
656 | tk->tkr_mono.cycle_last = cycle_now; | 677 | tk->tkr_mono.cycle_last = cycle_now; |
657 | tk->tkr_raw.cycle_last = cycle_now; | 678 | tk->tkr_raw.cycle_last = cycle_now; |
@@ -1434,7 +1455,7 @@ void timekeeping_resume(void) | |||
1434 | * The less preferred source will only be tried if there is no better | 1455 | * The less preferred source will only be tried if there is no better |
1435 | * usable source. The rtc part is handled separately in rtc core code. | 1456 | * usable source. The rtc part is handled separately in rtc core code. |
1436 | */ | 1457 | */ |
1437 | cycle_now = tk->tkr_mono.read(clock); | 1458 | cycle_now = tk_clock_read(&tk->tkr_mono); |
1438 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && | 1459 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && |
1439 | cycle_now > tk->tkr_mono.cycle_last) { | 1460 | cycle_now > tk->tkr_mono.cycle_last) { |
1440 | u64 num, max = ULLONG_MAX; | 1461 | u64 num, max = ULLONG_MAX; |
@@ -1829,7 +1850,7 @@ void update_wall_time(void) | |||
1829 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 1850 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
1830 | offset = real_tk->cycle_interval; | 1851 | offset = real_tk->cycle_interval; |
1831 | #else | 1852 | #else |
1832 | offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), | 1853 | offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), |
1833 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); | 1854 | tk->tkr_mono.cycle_last, tk->tkr_mono.mask); |
1834 | #endif | 1855 | #endif |
1835 | 1856 | ||