1 | // Copyright 2017 The Abseil Authors. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // https://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | #include "absl/time/clock.h" |
16 | |
17 | #include "absl/base/attributes.h" |
18 | #include "absl/base/optimization.h" |
19 | |
20 | #ifdef _WIN32 |
21 | #include <windows.h> |
22 | #endif |
23 | |
24 | #include <algorithm> |
25 | #include <atomic> |
26 | #include <cerrno> |
27 | #include <cstdint> |
28 | #include <ctime> |
29 | #include <limits> |
30 | |
31 | #include "absl/base/internal/spinlock.h" |
32 | #include "absl/base/internal/unscaledcycleclock.h" |
33 | #include "absl/base/macros.h" |
34 | #include "absl/base/port.h" |
35 | #include "absl/base/thread_annotations.h" |
36 | |
37 | namespace absl { |
38 | ABSL_NAMESPACE_BEGIN |
39 | Time Now() { |
40 | // TODO(bww): Get a timespec instead so we don't have to divide. |
41 | int64_t n = absl::GetCurrentTimeNanos(); |
42 | if (n >= 0) { |
43 | return time_internal::FromUnixDuration( |
44 | d: time_internal::MakeDuration(hi: n / 1000000000, lo: n % 1000000000 * 4)); |
45 | } |
46 | return time_internal::FromUnixDuration(d: absl::Nanoseconds(n)); |
47 | } |
48 | ABSL_NAMESPACE_END |
49 | } // namespace absl |
50 | |
51 | // Decide if we should use the fast GetCurrentTimeNanos() algorithm |
52 | // based on the cyclecounter, otherwise just get the time directly |
53 | // from the OS on every call. This can be chosen at compile-time via |
54 | // -DABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS=[0|1] |
55 | #ifndef ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS |
56 | #if ABSL_USE_UNSCALED_CYCLECLOCK |
57 | #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 1 |
58 | #else |
59 | #define ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS 0 |
60 | #endif |
61 | #endif |
62 | |
63 | #if defined(__APPLE__) || defined(_WIN32) |
64 | #include "absl/time/internal/get_current_time_chrono.inc" |
65 | #else |
66 | #include "absl/time/internal/get_current_time_posix.inc" |
67 | #endif |
68 | |
69 | // Allows override by test. |
70 | #ifndef GET_CURRENT_TIME_NANOS_FROM_SYSTEM |
71 | #define GET_CURRENT_TIME_NANOS_FROM_SYSTEM() \ |
72 | ::absl::time_internal::GetCurrentTimeNanosFromSystem() |
73 | #endif |
74 | |
75 | #if !ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS |
76 | namespace absl { |
77 | ABSL_NAMESPACE_BEGIN |
78 | int64_t GetCurrentTimeNanos() { return GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); } |
79 | ABSL_NAMESPACE_END |
80 | } // namespace absl |
81 | #else // Use the cyclecounter-based implementation below. |
82 | |
83 | // Allows override by test. |
84 | #ifndef GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW |
85 | #define GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW() \ |
86 | ::absl::time_internal::UnscaledCycleClockWrapperForGetCurrentTime::Now() |
87 | #endif |
88 | |
89 | namespace absl { |
90 | ABSL_NAMESPACE_BEGIN |
91 | namespace time_internal { |
92 | // This is a friend wrapper around UnscaledCycleClock::Now() |
93 | // (needed to access UnscaledCycleClock). |
94 | class UnscaledCycleClockWrapperForGetCurrentTime { |
95 | public: |
96 | static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } |
97 | }; |
98 | } // namespace time_internal |
99 | |
100 | // uint64_t is used in this module to provide an extra bit in multiplications |
101 | |
102 | // --------------------------------------------------------------------- |
103 | // An implementation of reader-write locks that use no atomic ops in the read |
104 | // case. This is a generalization of Lamport's method for reading a multiword |
105 | // clock. Increment a word on each write acquisition, using the low-order bit |
106 | // as a spinlock; the word is the high word of the "clock". Readers read the |
107 | // high word, then all other data, then the high word again, and repeat the |
108 | // read if the reads of the high words yields different answers, or an odd |
109 | // value (either case suggests possible interference from a writer). |
110 | // Here we use a spinlock to ensure only one writer at a time, rather than |
111 | // spinning on the bottom bit of the word to benefit from SpinLock |
112 | // spin-delay tuning. |
113 | |
114 | // Acquire seqlock (*seq) and return the value to be written to unlock. |
115 | static inline uint64_t SeqAcquire(std::atomic<uint64_t> *seq) { |
116 | uint64_t x = seq->fetch_add(op: 1, m: std::memory_order_relaxed); |
117 | |
118 | // We put a release fence between update to *seq and writes to shared data. |
119 | // Thus all stores to shared data are effectively release operations and |
120 | // update to *seq above cannot be re-ordered past any of them. Note that |
121 | // this barrier is not for the fetch_add above. A release barrier for the |
122 | // fetch_add would be before it, not after. |
123 | std::atomic_thread_fence(m: std::memory_order_release); |
124 | |
125 | return x + 2; // original word plus 2 |
126 | } |
127 | |
128 | // Release seqlock (*seq) by writing x to it---a value previously returned by |
129 | // SeqAcquire. |
130 | static inline void SeqRelease(std::atomic<uint64_t> *seq, uint64_t x) { |
131 | // The unlock store to *seq must have release ordering so that all |
132 | // updates to shared data must finish before this store. |
133 | seq->store(d: x, m: std::memory_order_release); // release lock for readers |
134 | } |
135 | |
136 | // --------------------------------------------------------------------- |
137 | |
138 | // "nsscaled" is unit of time equal to a (2**kScale)th of a nanosecond. |
139 | enum { kScale = 30 }; |
140 | |
141 | // The minimum interval between samples of the time base. |
142 | // We pick enough time to amortize the cost of the sample, |
143 | // to get a reasonably accurate cycle counter rate reading, |
144 | // and not so much that calculations will overflow 64-bits. |
145 | static const uint64_t kMinNSBetweenSamples = 2000 << 20; |
146 | |
147 | // We require that kMinNSBetweenSamples shifted by kScale |
148 | // have at least a bit left over for 64-bit calculations. |
149 | static_assert(((kMinNSBetweenSamples << (kScale + 1)) >> (kScale + 1)) == |
150 | kMinNSBetweenSamples, |
151 | "cannot represent kMaxBetweenSamplesNSScaled" ); |
152 | |
153 | // data from a sample of the kernel's time value |
154 | struct TimeSampleAtomic { |
155 | std::atomic<uint64_t> raw_ns{0}; // raw kernel time |
156 | std::atomic<uint64_t> base_ns{0}; // our estimate of time |
157 | std::atomic<uint64_t> base_cycles{0}; // cycle counter reading |
158 | std::atomic<uint64_t> nsscaled_per_cycle{0}; // cycle period |
159 | // cycles before we'll sample again (a scaled reciprocal of the period, |
160 | // to avoid a division on the fast path). |
161 | std::atomic<uint64_t> min_cycles_per_sample{0}; |
162 | }; |
163 | // Same again, but with non-atomic types |
164 | struct TimeSample { |
165 | uint64_t raw_ns = 0; // raw kernel time |
166 | uint64_t base_ns = 0; // our estimate of time |
167 | uint64_t base_cycles = 0; // cycle counter reading |
168 | uint64_t nsscaled_per_cycle = 0; // cycle period |
169 | uint64_t min_cycles_per_sample = 0; // approx cycles before next sample |
170 | }; |
171 | |
172 | struct ABSL_CACHELINE_ALIGNED TimeState { |
173 | std::atomic<uint64_t> seq{0}; |
174 | TimeSampleAtomic last_sample; // the last sample; under seq |
175 | |
176 | // The following counters are used only by the test code. |
177 | int64_t stats_initializations{0}; |
178 | int64_t stats_reinitializations{0}; |
179 | int64_t stats_calibrations{0}; |
180 | int64_t stats_slow_paths{0}; |
181 | int64_t stats_fast_slow_paths{0}; |
182 | |
183 | uint64_t last_now_cycles ABSL_GUARDED_BY(lock){0}; |
184 | |
185 | // Used by GetCurrentTimeNanosFromKernel(). |
186 | // We try to read clock values at about the same time as the kernel clock. |
187 | // This value gets adjusted up or down as estimate of how long that should |
188 | // take, so we can reject attempts that take unusually long. |
189 | std::atomic<uint64_t> approx_syscall_time_in_cycles{10 * 1000}; |
190 | // Number of times in a row we've seen a kernel time call take substantially |
191 | // less than approx_syscall_time_in_cycles. |
192 | std::atomic<uint32_t> kernel_time_seen_smaller{0}; |
193 | |
194 | // A reader-writer lock protecting the static locations below. |
195 | // See SeqAcquire() and SeqRelease() above. |
196 | absl::base_internal::SpinLock lock{absl::kConstInit, |
197 | base_internal::SCHEDULE_KERNEL_ONLY}; |
198 | }; |
199 | ABSL_CONST_INIT static TimeState time_state{}; |
200 | |
201 | // Return the time in ns as told by the kernel interface. Place in *cycleclock |
202 | // the value of the cycleclock at about the time of the syscall. |
203 | // This call represents the time base that this module synchronizes to. |
204 | // Ensures that *cycleclock does not step back by up to (1 << 16) from |
205 | // last_cycleclock, to discard small backward counter steps. (Larger steps are |
206 | // assumed to be complete resyncs, which shouldn't happen. If they do, a full |
207 | // reinitialization of the outer algorithm should occur.) |
208 | static int64_t GetCurrentTimeNanosFromKernel(uint64_t last_cycleclock, |
209 | uint64_t *cycleclock) |
210 | ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { |
211 | uint64_t local_approx_syscall_time_in_cycles = // local copy |
212 | time_state.approx_syscall_time_in_cycles.load(m: std::memory_order_relaxed); |
213 | |
214 | int64_t current_time_nanos_from_system; |
215 | uint64_t before_cycles; |
216 | uint64_t after_cycles; |
217 | uint64_t elapsed_cycles; |
218 | int loops = 0; |
219 | do { |
220 | before_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); |
221 | current_time_nanos_from_system = GET_CURRENT_TIME_NANOS_FROM_SYSTEM(); |
222 | after_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); |
223 | // elapsed_cycles is unsigned, so is large on overflow |
224 | elapsed_cycles = after_cycles - before_cycles; |
225 | if (elapsed_cycles >= local_approx_syscall_time_in_cycles && |
226 | ++loops == 20) { // clock changed frequencies? Back off. |
227 | loops = 0; |
228 | if (local_approx_syscall_time_in_cycles < 1000 * 1000) { |
229 | local_approx_syscall_time_in_cycles = |
230 | (local_approx_syscall_time_in_cycles + 1) << 1; |
231 | } |
232 | time_state.approx_syscall_time_in_cycles.store( |
233 | d: local_approx_syscall_time_in_cycles, m: std::memory_order_relaxed); |
234 | } |
235 | } while (elapsed_cycles >= local_approx_syscall_time_in_cycles || |
236 | last_cycleclock - after_cycles < (static_cast<uint64_t>(1) << 16)); |
237 | |
238 | // Adjust approx_syscall_time_in_cycles to be within a factor of 2 |
239 | // of the typical time to execute one iteration of the loop above. |
240 | if ((local_approx_syscall_time_in_cycles >> 1) < elapsed_cycles) { |
241 | // measured time is no smaller than half current approximation |
242 | time_state.kernel_time_seen_smaller.store(d: 0, m: std::memory_order_relaxed); |
243 | } else if (time_state.kernel_time_seen_smaller.fetch_add( |
244 | op: 1, m: std::memory_order_relaxed) >= 3) { |
245 | // smaller delays several times in a row; reduce approximation by 12.5% |
246 | const uint64_t new_approximation = |
247 | local_approx_syscall_time_in_cycles - |
248 | (local_approx_syscall_time_in_cycles >> 3); |
249 | time_state.approx_syscall_time_in_cycles.store(d: new_approximation, |
250 | m: std::memory_order_relaxed); |
251 | time_state.kernel_time_seen_smaller.store(d: 0, m: std::memory_order_relaxed); |
252 | } |
253 | |
254 | *cycleclock = after_cycles; |
255 | return current_time_nanos_from_system; |
256 | } |
257 | |
258 | static int64_t GetCurrentTimeNanosSlowPath() ABSL_ATTRIBUTE_COLD; |
259 | |
260 | // Read the contents of *atomic into *sample. |
261 | // Each field is read atomically, but to maintain atomicity between fields, |
262 | // the access must be done under a lock. |
263 | static void ReadTimeSampleAtomic(const struct TimeSampleAtomic *atomic, |
264 | struct TimeSample *sample) { |
265 | sample->base_ns = atomic->base_ns.load(m: std::memory_order_relaxed); |
266 | sample->base_cycles = atomic->base_cycles.load(m: std::memory_order_relaxed); |
267 | sample->nsscaled_per_cycle = |
268 | atomic->nsscaled_per_cycle.load(m: std::memory_order_relaxed); |
269 | sample->min_cycles_per_sample = |
270 | atomic->min_cycles_per_sample.load(m: std::memory_order_relaxed); |
271 | sample->raw_ns = atomic->raw_ns.load(m: std::memory_order_relaxed); |
272 | } |
273 | |
274 | // Public routine. |
275 | // Algorithm: We wish to compute real time from a cycle counter. In normal |
276 | // operation, we construct a piecewise linear approximation to the kernel time |
277 | // source, using the cycle counter value. The start of each line segment is at |
278 | // the same point as the end of the last, but may have a different slope (that |
279 | // is, a different idea of the cycle counter frequency). Every couple of |
280 | // seconds, the kernel time source is sampled and compared with the current |
281 | // approximation. A new slope is chosen that, if followed for another couple |
282 | // of seconds, will correct the error at the current position. The information |
283 | // for a sample is in the "last_sample" struct. The linear approximation is |
284 | // estimated_time = last_sample.base_ns + |
285 | // last_sample.ns_per_cycle * (counter_reading - last_sample.base_cycles) |
286 | // (ns_per_cycle is actually stored in different units and scaled, to avoid |
287 | // overflow). The base_ns of the next linear approximation is the |
288 | // estimated_time using the last approximation; the base_cycles is the cycle |
289 | // counter value at that time; the ns_per_cycle is the number of ns per cycle |
290 | // measured since the last sample, but adjusted so that most of the difference |
291 | // between the estimated_time and the kernel time will be corrected by the |
292 | // estimated time to the next sample. In normal operation, this algorithm |
293 | // relies on: |
294 | // - the cycle counter and kernel time rates not changing a lot in a few |
295 | // seconds. |
296 | // - the client calling into the code often compared to a couple of seconds, so |
297 | // the time to the next correction can be estimated. |
298 | // Any time ns_per_cycle is not known, a major error is detected, or the |
299 | // assumption about frequent calls is violated, the implementation returns the |
300 | // kernel time. It records sufficient data that a linear approximation can |
301 | // resume a little later. |
302 | |
303 | int64_t GetCurrentTimeNanos() { |
304 | // read the data from the "last_sample" struct (but don't need raw_ns yet) |
305 | // The reads of "seq" and test of the values emulate a reader lock. |
306 | uint64_t base_ns; |
307 | uint64_t base_cycles; |
308 | uint64_t nsscaled_per_cycle; |
309 | uint64_t min_cycles_per_sample; |
310 | uint64_t seq_read0; |
311 | uint64_t seq_read1; |
312 | |
313 | // If we have enough information to interpolate, the value returned will be |
314 | // derived from this cycleclock-derived time estimate. On some platforms |
315 | // (POWER) the function to retrieve this value has enough complexity to |
316 | // contribute to register pressure - reading it early before initializing |
317 | // the other pieces of the calculation minimizes spill/restore instructions, |
318 | // minimizing icache cost. |
319 | uint64_t now_cycles = GET_CURRENT_TIME_NANOS_CYCLECLOCK_NOW(); |
320 | |
321 | // Acquire pairs with the barrier in SeqRelease - if this load sees that |
322 | // store, the shared-data reads necessarily see that SeqRelease's updates |
323 | // to the same shared data. |
324 | seq_read0 = time_state.seq.load(m: std::memory_order_acquire); |
325 | |
326 | base_ns = time_state.last_sample.base_ns.load(m: std::memory_order_relaxed); |
327 | base_cycles = |
328 | time_state.last_sample.base_cycles.load(m: std::memory_order_relaxed); |
329 | nsscaled_per_cycle = |
330 | time_state.last_sample.nsscaled_per_cycle.load(m: std::memory_order_relaxed); |
331 | min_cycles_per_sample = time_state.last_sample.min_cycles_per_sample.load( |
332 | m: std::memory_order_relaxed); |
333 | |
334 | // This acquire fence pairs with the release fence in SeqAcquire. Since it |
335 | // is sequenced between reads of shared data and seq_read1, the reads of |
336 | // shared data are effectively acquiring. |
337 | std::atomic_thread_fence(m: std::memory_order_acquire); |
338 | |
339 | // The shared-data reads are effectively acquire ordered, and the |
340 | // shared-data writes are effectively release ordered. Therefore if our |
341 | // shared-data reads see any of a particular update's shared-data writes, |
342 | // seq_read1 is guaranteed to see that update's SeqAcquire. |
343 | seq_read1 = time_state.seq.load(m: std::memory_order_relaxed); |
344 | |
345 | // Fast path. Return if min_cycles_per_sample has not yet elapsed since the |
346 | // last sample, and we read a consistent sample. The fast path activates |
347 | // only when min_cycles_per_sample is non-zero, which happens when we get an |
348 | // estimate for the cycle time. The predicate will fail if now_cycles < |
349 | // base_cycles, or if some other thread is in the slow path. |
350 | // |
351 | // Since we now read now_cycles before base_ns, it is possible for now_cycles |
352 | // to be less than base_cycles (if we were interrupted between those loads and |
353 | // last_sample was updated). This is harmless, because delta_cycles will wrap |
354 | // and report a time much much bigger than min_cycles_per_sample. In that case |
355 | // we will take the slow path. |
356 | uint64_t delta_cycles; |
357 | if (seq_read0 == seq_read1 && (seq_read0 & 1) == 0 && |
358 | (delta_cycles = now_cycles - base_cycles) < min_cycles_per_sample) { |
359 | return base_ns + ((delta_cycles * nsscaled_per_cycle) >> kScale); |
360 | } |
361 | return GetCurrentTimeNanosSlowPath(); |
362 | } |
363 | |
364 | // Return (a << kScale)/b. |
365 | // Zero is returned if b==0. Scaling is performed internally to |
366 | // preserve precision without overflow. |
367 | static uint64_t SafeDivideAndScale(uint64_t a, uint64_t b) { |
368 | // Find maximum safe_shift so that |
369 | // 0 <= safe_shift <= kScale and (a << safe_shift) does not overflow. |
370 | int safe_shift = kScale; |
371 | while (((a << safe_shift) >> safe_shift) != a) { |
372 | safe_shift--; |
373 | } |
374 | uint64_t scaled_b = b >> (kScale - safe_shift); |
375 | uint64_t quotient = 0; |
376 | if (scaled_b != 0) { |
377 | quotient = (a << safe_shift) / scaled_b; |
378 | } |
379 | return quotient; |
380 | } |
381 | |
382 | static uint64_t UpdateLastSample( |
383 | uint64_t now_cycles, uint64_t now_ns, uint64_t delta_cycles, |
384 | const struct TimeSample *sample) ABSL_ATTRIBUTE_COLD; |
385 | |
386 | // The slow path of GetCurrentTimeNanos(). This is taken while gathering |
387 | // initial samples, when enough time has elapsed since the last sample, and if |
388 | // any other thread is writing to last_sample. |
389 | // |
390 | // Manually mark this 'noinline' to minimize stack frame size of the fast |
391 | // path. Without this, sometimes a compiler may inline this big block of code |
392 | // into the fast path. That causes lots of register spills and reloads that |
393 | // are unnecessary unless the slow path is taken. |
394 | // |
395 | // TODO(absl-team): Remove this attribute when our compiler is smart enough |
396 | // to do the right thing. |
397 | ABSL_ATTRIBUTE_NOINLINE |
398 | static int64_t GetCurrentTimeNanosSlowPath() |
399 | ABSL_LOCKS_EXCLUDED(time_state.lock) { |
400 | // Serialize access to slow-path. Fast-path readers are not blocked yet, and |
401 | // code below must not modify last_sample until the seqlock is acquired. |
402 | time_state.lock.Lock(); |
403 | |
404 | // Sample the kernel time base. This is the definition of |
405 | // "now" if we take the slow path. |
406 | uint64_t now_cycles; |
407 | uint64_t now_ns = |
408 | GetCurrentTimeNanosFromKernel(last_cycleclock: time_state.last_now_cycles, cycleclock: &now_cycles); |
409 | time_state.last_now_cycles = now_cycles; |
410 | |
411 | uint64_t estimated_base_ns; |
412 | |
413 | // ---------- |
414 | // Read the "last_sample" values again; this time holding the write lock. |
415 | struct TimeSample sample; |
416 | ReadTimeSampleAtomic(atomic: &time_state.last_sample, sample: &sample); |
417 | |
418 | // ---------- |
419 | // Try running the fast path again; another thread may have updated the |
420 | // sample between our run of the fast path and the sample we just read. |
421 | uint64_t delta_cycles = now_cycles - sample.base_cycles; |
422 | if (delta_cycles < sample.min_cycles_per_sample) { |
423 | // Another thread updated the sample. This path does not take the seqlock |
424 | // so that blocked readers can make progress without blocking new readers. |
425 | estimated_base_ns = sample.base_ns + |
426 | ((delta_cycles * sample.nsscaled_per_cycle) >> kScale); |
427 | time_state.stats_fast_slow_paths++; |
428 | } else { |
429 | estimated_base_ns = |
430 | UpdateLastSample(now_cycles, now_ns, delta_cycles, sample: &sample); |
431 | } |
432 | |
433 | time_state.lock.Unlock(); |
434 | |
435 | return estimated_base_ns; |
436 | } |
437 | |
438 | // Main part of the algorithm. Locks out readers, updates the approximation |
439 | // using the new sample from the kernel, and stores the result in last_sample |
440 | // for readers. Returns the new estimated time. |
441 | static uint64_t UpdateLastSample(uint64_t now_cycles, uint64_t now_ns, |
442 | uint64_t delta_cycles, |
443 | const struct TimeSample *sample) |
444 | ABSL_EXCLUSIVE_LOCKS_REQUIRED(time_state.lock) { |
445 | uint64_t estimated_base_ns = now_ns; |
446 | uint64_t lock_value = |
447 | SeqAcquire(seq: &time_state.seq); // acquire seqlock to block readers |
448 | |
449 | // The 5s in the next if-statement limits the time for which we will trust |
450 | // the cycle counter and our last sample to give a reasonable result. |
451 | // Errors in the rate of the source clock can be multiplied by the ratio |
452 | // between this limit and kMinNSBetweenSamples. |
453 | if (sample->raw_ns == 0 || // no recent sample, or clock went backwards |
454 | sample->raw_ns + static_cast<uint64_t>(5) * 1000 * 1000 * 1000 < now_ns || |
455 | now_ns < sample->raw_ns || now_cycles < sample->base_cycles) { |
456 | // record this sample, and forget any previously known slope. |
457 | time_state.last_sample.raw_ns.store(d: now_ns, m: std::memory_order_relaxed); |
458 | time_state.last_sample.base_ns.store(d: estimated_base_ns, |
459 | m: std::memory_order_relaxed); |
460 | time_state.last_sample.base_cycles.store(d: now_cycles, |
461 | m: std::memory_order_relaxed); |
462 | time_state.last_sample.nsscaled_per_cycle.store(d: 0, |
463 | m: std::memory_order_relaxed); |
464 | time_state.last_sample.min_cycles_per_sample.store( |
465 | d: 0, m: std::memory_order_relaxed); |
466 | time_state.stats_initializations++; |
467 | } else if (sample->raw_ns + 500 * 1000 * 1000 < now_ns && |
468 | sample->base_cycles + 50 < now_cycles) { |
469 | // Enough time has passed to compute the cycle time. |
470 | if (sample->nsscaled_per_cycle != 0) { // Have a cycle time estimate. |
471 | // Compute time from counter reading, but avoiding overflow |
472 | // delta_cycles may be larger than on the fast path. |
473 | uint64_t estimated_scaled_ns; |
474 | int s = -1; |
475 | do { |
476 | s++; |
477 | estimated_scaled_ns = (delta_cycles >> s) * sample->nsscaled_per_cycle; |
478 | } while (estimated_scaled_ns / sample->nsscaled_per_cycle != |
479 | (delta_cycles >> s)); |
480 | estimated_base_ns = sample->base_ns + |
481 | (estimated_scaled_ns >> (kScale - s)); |
482 | } |
483 | |
484 | // Compute the assumed cycle time kMinNSBetweenSamples ns into the future |
485 | // assuming the cycle counter rate stays the same as the last interval. |
486 | uint64_t ns = now_ns - sample->raw_ns; |
487 | uint64_t measured_nsscaled_per_cycle = SafeDivideAndScale(a: ns, b: delta_cycles); |
488 | |
489 | uint64_t assumed_next_sample_delta_cycles = |
490 | SafeDivideAndScale(a: kMinNSBetweenSamples, b: measured_nsscaled_per_cycle); |
491 | |
492 | int64_t diff_ns = now_ns - estimated_base_ns; // estimate low by this much |
493 | |
494 | // We want to set nsscaled_per_cycle so that our estimate of the ns time |
495 | // at the assumed cycle time is the assumed ns time. |
496 | // That is, we want to set nsscaled_per_cycle so: |
497 | // kMinNSBetweenSamples + diff_ns == |
498 | // (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale |
499 | // But we wish to damp oscillations, so instead correct only most |
500 | // of our current error, by solving: |
501 | // kMinNSBetweenSamples + diff_ns - (diff_ns / 16) == |
502 | // (assumed_next_sample_delta_cycles * nsscaled_per_cycle) >> kScale |
503 | ns = kMinNSBetweenSamples + diff_ns - (diff_ns / 16); |
504 | uint64_t new_nsscaled_per_cycle = |
505 | SafeDivideAndScale(a: ns, b: assumed_next_sample_delta_cycles); |
506 | if (new_nsscaled_per_cycle != 0 && |
507 | diff_ns < 100 * 1000 * 1000 && -diff_ns < 100 * 1000 * 1000) { |
508 | // record the cycle time measurement |
509 | time_state.last_sample.nsscaled_per_cycle.store( |
510 | d: new_nsscaled_per_cycle, m: std::memory_order_relaxed); |
511 | uint64_t new_min_cycles_per_sample = |
512 | SafeDivideAndScale(a: kMinNSBetweenSamples, b: new_nsscaled_per_cycle); |
513 | time_state.last_sample.min_cycles_per_sample.store( |
514 | d: new_min_cycles_per_sample, m: std::memory_order_relaxed); |
515 | time_state.stats_calibrations++; |
516 | } else { // something went wrong; forget the slope |
517 | time_state.last_sample.nsscaled_per_cycle.store( |
518 | d: 0, m: std::memory_order_relaxed); |
519 | time_state.last_sample.min_cycles_per_sample.store( |
520 | d: 0, m: std::memory_order_relaxed); |
521 | estimated_base_ns = now_ns; |
522 | time_state.stats_reinitializations++; |
523 | } |
524 | time_state.last_sample.raw_ns.store(d: now_ns, m: std::memory_order_relaxed); |
525 | time_state.last_sample.base_ns.store(d: estimated_base_ns, |
526 | m: std::memory_order_relaxed); |
527 | time_state.last_sample.base_cycles.store(d: now_cycles, |
528 | m: std::memory_order_relaxed); |
529 | } else { |
530 | // have a sample, but no slope; waiting for enough time for a calibration |
531 | time_state.stats_slow_paths++; |
532 | } |
533 | |
534 | SeqRelease(seq: &time_state.seq, x: lock_value); // release the readers |
535 | |
536 | return estimated_base_ns; |
537 | } |
538 | ABSL_NAMESPACE_END |
539 | } // namespace absl |
540 | #endif // ABSL_USE_CYCLECLOCK_FOR_GET_CURRENT_TIME_NANOS |
541 | |
542 | namespace absl { |
543 | ABSL_NAMESPACE_BEGIN |
544 | namespace { |
545 | |
546 | // Returns the maximum duration that SleepOnce() can sleep for. |
547 | constexpr absl::Duration MaxSleep() { |
548 | #ifdef _WIN32 |
549 | // Windows Sleep() takes unsigned long argument in milliseconds. |
550 | return absl::Milliseconds( |
551 | std::numeric_limits<unsigned long>::max()); // NOLINT(runtime/int) |
552 | #else |
553 | return absl::Seconds(n: std::numeric_limits<time_t>::max()); |
554 | #endif |
555 | } |
556 | |
557 | // Sleeps for the given duration. |
558 | // REQUIRES: to_sleep <= MaxSleep(). |
559 | void SleepOnce(absl::Duration to_sleep) { |
560 | #ifdef _WIN32 |
561 | Sleep(to_sleep / absl::Milliseconds(1)); |
562 | #else |
563 | struct timespec sleep_time = absl::ToTimespec(d: to_sleep); |
564 | while (nanosleep(requested_time: &sleep_time, remaining: &sleep_time) != 0 && errno == EINTR) { |
565 | // Ignore signals and wait for the full interval to elapse. |
566 | } |
567 | #endif |
568 | } |
569 | |
570 | } // namespace |
571 | ABSL_NAMESPACE_END |
572 | } // namespace absl |
573 | |
574 | extern "C" { |
575 | |
576 | ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSleepFor)( |
577 | absl::Duration duration) { |
578 | while (duration > absl::ZeroDuration()) { |
579 | absl::Duration to_sleep = std::min(a: duration, b: absl::MaxSleep()); |
580 | absl::SleepOnce(to_sleep); |
581 | duration -= to_sleep; |
582 | } |
583 | } |
584 | |
585 | } // extern "C" |
586 | |