22 #define binuptime(_bt) _Timecounter_Binuptime(_bt) 23 #define nanouptime(_tsp) _Timecounter_Nanouptime(_tsp) 24 #define microuptime(_tvp) _Timecounter_Microuptime(_tvp) 25 #define bintime(_bt) _Timecounter_Bintime(_bt) 26 #define nanotime(_tsp) _Timecounter_Nanotime(_tsp) 27 #define microtime(_tvp) _Timecounter_Microtime(_tvp) 28 #define getbinuptime(_bt) _Timecounter_Getbinuptime(_bt) 29 #define getnanouptime(_tsp) _Timecounter_Getnanouptime(_tsp) 30 #define getmicrouptime(_tvp) _Timecounter_Getmicrouptime(_tvp) 31 #define getbintime(_bt) _Timecounter_Getbintime(_bt) 32 #define getnanotime(_tsp) _Timecounter_Getnanotime(_tsp) 33 #define getmicrotime(_tvp) _Timecounter_Getmicrotime(_tvp) 34 #define getboottime(_tvp) _Timecounter_Getboottime(_tvp) 35 #define getboottimebin(_bt) _Timecounter_Getboottimebin(_bt) 36 #define tc_init _Timecounter_Install 37 #define timecounter _Timecounter 38 #define time_second _Timecounter_Time_second 39 #define time_uptime _Timecounter_Time_uptime 46 #include <sys/cdefs.h> 47 __FBSDID(
"$FreeBSD: head/sys/kern/kern_tc.c 324528 2017-10-11 11:03:11Z kib $");
49 #include "opt_compat.h" 51 #include "opt_ffclock.h" 53 #include <sys/param.h> 55 #include <sys/kernel.h> 56 #include <sys/limits.h> 58 #include <sys/mutex.h> 61 #include <sys/sleepqueue.h> 62 #include <sys/sysctl.h> 63 #include <sys/syslog.h> 64 #include <sys/systm.h> 66 #include <sys/timeffc.h> 67 #include <sys/timepps.h> 68 #include <sys/timetc.h> 69 #include <sys/timex.h> 78 #define _Timecounter_Release(lock_context) \ 79 _ISR_lock_Release_and_ISR_enable(&_Timecounter_Lock, lock_context) 80 #define hz rtems_clock_get_ticks_per_second() 82 #define bcopy(x, y, z) memcpy(y, x, z); 87 return x ?
sizeof(x) * 8 - __builtin_clz(x) : 0;
89 #define fls(x) builtin_fls(x) 91 #define ntp_update_second(a, b) do { (void) a; (void) b; } while (0) 94 atomic_thread_fence_acq(
void)
97 _Atomic_Fence(ATOMIC_ORDER_ACQUIRE);
101 atomic_thread_fence_rel(
void)
104 _Atomic_Fence(ATOMIC_ORDER_RELEASE);
108 atomic_load_acq_int(Atomic_Uint *i)
111 return (_Atomic_Load_uint(i, ATOMIC_ORDER_ACQUIRE));
115 atomic_store_rel_int(Atomic_Uint *i, u_int val)
118 _Atomic_Store_uint(i, val, ATOMIC_ORDER_RELEASE);
128 #define LARGE_STEP 200 150 dummy_get_timecount, 0, ~0u, 1000000,
"dummy", -1000000
152 dummy_get_timecount, ~(uint32_t)0, 1000000,
"dummy", -1000000
159 int64_t th_adjustment;
161 uint32_t th_offset_count;
162 struct bintime th_offset;
163 struct bintime th_bintime;
164 struct timeval th_microtime;
165 struct timespec th_nanotime;
166 struct bintime th_boottime;
171 Atomic_Uint th_generation;
176 #if defined(RTEMS_SMP) 183 .th_counter = &dummy_timecounter,
184 .th_scale = (uint64_t)-1 / 1000000,
185 .th_offset = { .sec = 1 },
193 #if defined(RTEMS_SMP) 203 static struct timecounter *timecounters = &dummy_timecounter;
205 int tc_min_ticktock_freq = 1;
209 volatile time_t time_second = 1;
210 volatile time_t time_uptime = 1;
213 volatile int32_t time_uptime = 1;
217 static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS);
218 SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD,
219 NULL, 0, sysctl_kern_boottime,
"S,timeval",
"System boottime");
221 SYSCTL_NODE(_kern, OID_AUTO,
timecounter, CTLFLAG_RW, 0,
"");
222 static SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0,
"");
224 static int timestepwarnings;
225 SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
226 ×tepwarnings, 0,
"Log time steps");
228 struct bintime bt_timethreshold;
229 struct bintime bt_tickthreshold;
230 sbintime_t sbt_timethreshold;
231 sbintime_t sbt_tickthreshold;
232 struct bintime tc_tick_bt;
233 sbintime_t tc_tick_sbt;
235 int tc_timepercentage = TC_DEFAULTPERC;
236 static int sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS);
237 SYSCTL_PROC(_kern_timecounter, OID_AUTO, alloweddeviation,
238 CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, 0,
239 sysctl_kern_timecounter_adjprecision,
"I",
240 "Allowed time interval deviation in percents");
242 volatile int rtc_generation = 1;
244 static int tc_chosen;
247 static void tc_windup(
struct bintime *new_boottimebin);
249 static void cpu_tick_calibrate(
int);
251 static void _Timecounter_Windup(
struct bintime *new_boottimebin,
255 void dtrace_getnanotime(
struct timespec *tsp);
259 sysctl_kern_boottime(SYSCTL_HANDLER_ARGS)
261 struct timeval boottime;
263 getboottime(&boottime);
269 if (req->flags & SCTL_MASK32) {
270 tv[0] = boottime.tv_sec;
271 tv[1] = boottime.tv_usec;
272 return (SYSCTL_OUT(req, tv,
sizeof(tv)));
276 return (SYSCTL_OUT(req, &boottime,
sizeof(boottime)));
280 sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS)
285 ncount = tc->tc_get_timecount(tc);
286 return (sysctl_handle_int(oidp, &ncount, 0, req));
290 sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS)
295 freq = tc->tc_frequency;
296 return (sysctl_handle_64(oidp, &freq, 0, req));
304 static __inline uint32_t
310 return ((tc->tc_get_timecount(tc) - th->th_offset_count) &
311 tc->tc_counter_mask);
322 fbclock_binuptime(
struct bintime *bt)
329 gen = atomic_load_acq_int(&th->th_generation);
331 bintime_addx(bt, th->th_scale * tc_delta(th));
332 atomic_thread_fence_acq();
333 }
while (gen == 0 || gen != th->th_generation);
337 fbclock_nanouptime(
struct timespec *tsp)
341 fbclock_binuptime(&bt);
342 bintime2timespec(&bt, tsp);
346 fbclock_microuptime(
struct timeval *tvp)
350 fbclock_binuptime(&bt);
351 bintime2timeval(&bt, tvp);
355 fbclock_bintime(
struct bintime *bt)
362 gen = atomic_load_acq_int(&th->th_generation);
363 *bt = th->th_bintime;
364 bintime_addx(bt, th->th_scale * tc_delta(th));
365 atomic_thread_fence_acq();
366 }
while (gen == 0 || gen != th->th_generation);
370 fbclock_nanotime(
struct timespec *tsp)
374 fbclock_bintime(&bt);
375 bintime2timespec(&bt, tsp);
379 fbclock_microtime(
struct timeval *tvp)
383 fbclock_bintime(&bt);
384 bintime2timeval(&bt, tvp);
388 fbclock_getbinuptime(
struct bintime *bt)
395 gen = atomic_load_acq_int(&th->th_generation);
397 atomic_thread_fence_acq();
398 }
while (gen == 0 || gen != th->th_generation);
402 fbclock_getnanouptime(
struct timespec *tsp)
409 gen = atomic_load_acq_int(&th->th_generation);
410 bintime2timespec(&th->th_offset, tsp);
411 atomic_thread_fence_acq();
412 }
while (gen == 0 || gen != th->th_generation);
416 fbclock_getmicrouptime(
struct timeval *tvp)
423 gen = atomic_load_acq_int(&th->th_generation);
424 bintime2timeval(&th->th_offset, tvp);
425 atomic_thread_fence_acq();
426 }
while (gen == 0 || gen != th->th_generation);
430 fbclock_getbintime(
struct bintime *bt)
437 gen = atomic_load_acq_int(&th->th_generation);
438 *bt = th->th_bintime;
439 atomic_thread_fence_acq();
440 }
while (gen == 0 || gen != th->th_generation);
444 fbclock_getnanotime(
struct timespec *tsp)
451 gen = atomic_load_acq_int(&th->th_generation);
452 *tsp = th->th_nanotime;
453 atomic_thread_fence_acq();
454 }
while (gen == 0 || gen != th->th_generation);
458 fbclock_getmicrotime(
struct timeval *tvp)
465 gen = atomic_load_acq_int(&th->th_generation);
466 *tvp = th->th_microtime;
467 atomic_thread_fence_acq();
468 }
while (gen == 0 || gen != th->th_generation);
472 binuptime(
struct bintime *bt)
479 gen = atomic_load_acq_int(&th->th_generation);
481 bintime_addx(bt, th->th_scale * tc_delta(th));
482 atomic_thread_fence_acq();
483 }
while (gen == 0 || gen != th->th_generation);
495 gen = atomic_load_acq_int(&th->th_generation);
496 sbt = bttosbt(th->th_offset);
497 sbt += (th->th_scale * tc_delta(th)) >> 32;
498 atomic_thread_fence_acq();
499 }
while (gen == 0 || gen != th->th_generation);
506 nanouptime(
struct timespec *tsp)
511 bintime2timespec(&bt, tsp);
515 microuptime(
struct timeval *tvp)
520 bintime2timeval(&bt, tvp);
524 bintime(
struct bintime *bt)
531 gen = atomic_load_acq_int(&th->th_generation);
532 *bt = th->th_bintime;
533 bintime_addx(bt, th->th_scale * tc_delta(th));
534 atomic_thread_fence_acq();
535 }
while (gen == 0 || gen != th->th_generation);
539 nanotime(
struct timespec *tsp)
544 bintime2timespec(&bt, tsp);
548 microtime(
struct timeval *tvp)
553 bintime2timeval(&bt, tvp);
557 getbinuptime(
struct bintime *bt)
564 gen = atomic_load_acq_int(&th->th_generation);
566 atomic_thread_fence_acq();
567 }
while (gen == 0 || gen != th->th_generation);
571 getnanouptime(
struct timespec *tsp)
578 gen = atomic_load_acq_int(&th->th_generation);
579 bintime2timespec(&th->th_offset, tsp);
580 atomic_thread_fence_acq();
581 }
while (gen == 0 || gen != th->th_generation);
585 getmicrouptime(
struct timeval *tvp)
592 gen = atomic_load_acq_int(&th->th_generation);
593 bintime2timeval(&th->th_offset, tvp);
594 atomic_thread_fence_acq();
595 }
while (gen == 0 || gen != th->th_generation);
599 getbintime(
struct bintime *bt)
606 gen = atomic_load_acq_int(&th->th_generation);
607 *bt = th->th_bintime;
608 atomic_thread_fence_acq();
609 }
while (gen == 0 || gen != th->th_generation);
613 getnanotime(
struct timespec *tsp)
620 gen = atomic_load_acq_int(&th->th_generation);
621 *tsp = th->th_nanotime;
622 atomic_thread_fence_acq();
623 }
while (gen == 0 || gen != th->th_generation);
627 getmicrotime(
struct timeval *tvp)
634 gen = atomic_load_acq_int(&th->th_generation);
635 *tvp = th->th_microtime;
636 atomic_thread_fence_acq();
637 }
while (gen == 0 || gen != th->th_generation);
642 getboottime(
struct timeval *boottime)
644 struct bintime boottimebin;
646 getboottimebin(&boottimebin);
647 bintime2timeval(&boottimebin, boottime);
651 getboottimebin(
struct bintime *boottimebin)
658 gen = atomic_load_acq_int(&th->th_generation);
659 *boottimebin = th->th_boottime;
660 atomic_thread_fence_acq();
661 }
while (gen == 0 || gen != th->th_generation);
674 struct bintime ffclock_boottime;
675 uint32_t ffclock_status;
676 int8_t ffclock_updated;
677 struct mtx ffclock_mtx;
681 struct bintime tick_time;
682 struct bintime tick_time_lerp;
683 ffcounter tick_ffcount;
684 uint64_t period_lerp;
685 volatile uint8_t gen;
686 struct fftimehands *next;
689 #define NUM_ELEMENTS(x) (sizeof(x) / sizeof(*x)) 691 static struct fftimehands ffth[10];
692 static struct fftimehands *
volatile fftimehands = ffth;
697 struct fftimehands *cur;
698 struct fftimehands *last;
700 memset(ffth, 0,
sizeof(ffth));
702 last = ffth + NUM_ELEMENTS(ffth) - 1;
703 for (cur = ffth; cur < last; cur++)
708 ffclock_status = FFCLOCK_STA_UNSYNC;
709 mtx_init(&ffclock_mtx,
"ffclock lock", NULL, MTX_DEF);
720 ffclock_reset_clock(
struct timespec *ts)
728 timespec2bintime(ts, &ffclock_boottime);
729 timespec2bintime(ts, &(cest.update_time));
730 ffclock_read_counter(&cest.update_ffcount);
731 cest.leapsec_next = 0;
732 cest.period = ((1ULL << 63) / tc->tc_frequency) << 1;
735 cest.status = FFCLOCK_STA_UNSYNC;
736 cest.leapsec_total = 0;
739 mtx_lock(&ffclock_mtx);
741 ffclock_updated = INT8_MAX;
742 mtx_unlock(&ffclock_mtx);
744 printf(
"ffclock reset: %s (%llu Hz), time = %ld.%09lu\n", tc->tc_name,
745 (
unsigned long long)tc->tc_frequency, (
long)ts->tv_sec,
746 (
unsigned long)ts->tv_nsec);
757 ffclock_convert_delta(ffcounter ffdelta, uint64_t period,
struct bintime *bt)
760 ffcounter delta, delta_max;
762 delta_max = (1ULL << (8 *
sizeof(
unsigned int))) - 1;
765 if (ffdelta > delta_max)
771 bintime_mul(&bt2, (
unsigned int)delta);
772 bintime_add(bt, &bt2);
774 }
while (ffdelta > 0);
786 ffclock_windup(
unsigned int delta)
789 struct fftimehands *ffth;
790 struct bintime bt, gap_lerp;
793 unsigned int polling;
794 uint8_t forward_jump, ogen;
801 ffth = fftimehands->next;
806 ffdelta = (ffcounter)delta;
807 ffth->period_lerp = fftimehands->period_lerp;
809 ffth->tick_time = fftimehands->tick_time;
810 ffclock_convert_delta(ffdelta, cest->period, &bt);
811 bintime_add(&ffth->tick_time, &bt);
813 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
814 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt);
815 bintime_add(&ffth->tick_time_lerp, &bt);
817 ffth->tick_ffcount = fftimehands->tick_ffcount + ffdelta;
824 if (ffclock_updated == 0) {
825 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
826 ffclock_convert_delta(ffdelta, cest->period, &bt);
827 if (bt.sec > 2 * FFCLOCK_SKM_SCALE)
828 ffclock_status |= FFCLOCK_STA_UNSYNC;
840 if (ffclock_updated > 0) {
842 ffdelta = ffth->tick_ffcount - cest->update_ffcount;
843 ffth->tick_time = cest->update_time;
844 ffclock_convert_delta(ffdelta, cest->period, &bt);
845 bintime_add(&ffth->tick_time, &bt);
848 if (ffclock_updated == INT8_MAX)
849 ffth->tick_time_lerp = ffth->tick_time;
851 if (bintime_cmp(&ffth->tick_time, &ffth->tick_time_lerp, >))
856 bintime_clear(&gap_lerp);
858 gap_lerp = ffth->tick_time;
859 bintime_sub(&gap_lerp, &ffth->tick_time_lerp);
861 gap_lerp = ffth->tick_time_lerp;
862 bintime_sub(&gap_lerp, &ffth->tick_time);
875 if (((ffclock_status & FFCLOCK_STA_UNSYNC) == FFCLOCK_STA_UNSYNC) &&
876 ((cest->status & FFCLOCK_STA_UNSYNC) == 0) &&
877 ((cest->status & FFCLOCK_STA_WARMUP) == FFCLOCK_STA_WARMUP)) {
879 bintime_add(&ffclock_boottime, &gap_lerp);
881 bintime_sub(&ffclock_boottime, &gap_lerp);
882 ffth->tick_time_lerp = ffth->tick_time;
883 bintime_clear(&gap_lerp);
886 ffclock_status = cest->status;
887 ffth->period_lerp = cest->period;
894 if (bintime_isset(&gap_lerp)) {
895 ffdelta = cest->update_ffcount;
896 ffdelta -= fftimehands->cest.update_ffcount;
897 ffclock_convert_delta(ffdelta, cest->period, &bt);
900 bt.frac = 5000000 * (uint64_t)18446744073LL;
901 bintime_mul(&bt, polling);
902 if (bintime_cmp(&gap_lerp, &bt, >))
907 if (gap_lerp.sec > 0) {
909 frac /= ffdelta / gap_lerp.sec;
911 frac += gap_lerp.frac / ffdelta;
914 ffth->period_lerp += frac;
916 ffth->period_lerp -= frac;
940 struct fftimehands *ffth;
946 ffth = fftimehands->next;
952 cest->period = ((1ULL << 63) / tc->tc_frequency ) << 1;
955 cest->status |= FFCLOCK_STA_UNSYNC;
957 ffth->tick_ffcount = fftimehands->tick_ffcount;
958 ffth->tick_time_lerp = fftimehands->tick_time_lerp;
959 ffth->tick_time = fftimehands->tick_time;
960 ffth->period_lerp = cest->period;
975 ffclock_last_tick(ffcounter *ffcount,
struct bintime *bt, uint32_t flags)
977 struct fftimehands *ffth;
987 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP)
988 *bt = ffth->tick_time_lerp;
990 *bt = ffth->tick_time;
991 *ffcount = ffth->tick_ffcount;
992 }
while (gen == 0 || gen != ffth->gen);
1003 ffclock_convert_abs(ffcounter ffcount,
struct bintime *bt, uint32_t flags)
1005 struct fftimehands *ffth;
1017 if (ffcount > ffth->tick_ffcount)
1018 ffdelta = ffcount - ffth->tick_ffcount;
1020 ffdelta = ffth->tick_ffcount - ffcount;
1022 if ((flags & FFCLOCK_LERP) == FFCLOCK_LERP) {
1023 *bt = ffth->tick_time_lerp;
1024 ffclock_convert_delta(ffdelta, ffth->period_lerp, &bt2);
1026 *bt = ffth->tick_time;
1027 ffclock_convert_delta(ffdelta, ffth->cest.period, &bt2);
1030 if (ffcount > ffth->tick_ffcount)
1031 bintime_add(bt, &bt2);
1033 bintime_sub(bt, &bt2);
1034 }
while (gen == 0 || gen != ffth->gen);
1044 ffclock_convert_diff(ffcounter ffdelta,
struct bintime *bt)
1046 struct fftimehands *ffth;
1053 ffclock_convert_delta(ffdelta, ffth->cest.period, bt);
1054 }
while (gen == 0 || gen != ffth->gen);
1061 ffclock_read_counter(ffcounter *ffcount)
1064 struct fftimehands *ffth;
1065 unsigned int gen, delta;
1073 gen = atomic_load_acq_int(&th->th_generation);
1075 delta = tc_delta(th);
1076 *ffcount = ffth->tick_ffcount;
1077 atomic_thread_fence_acq();
1078 }
while (gen == 0 || gen != th->th_generation);
1084 binuptime(
struct bintime *bt)
1087 binuptime_fromclock(bt, sysclock_active);
1091 nanouptime(
struct timespec *tsp)
1094 nanouptime_fromclock(tsp, sysclock_active);
1098 microuptime(
struct timeval *tvp)
1101 microuptime_fromclock(tvp, sysclock_active);
1105 bintime(
struct bintime *bt)
1108 bintime_fromclock(bt, sysclock_active);
1112 nanotime(
struct timespec *tsp)
1115 nanotime_fromclock(tsp, sysclock_active);
1119 microtime(
struct timeval *tvp)
1122 microtime_fromclock(tvp, sysclock_active);
1126 getbinuptime(
struct bintime *bt)
1129 getbinuptime_fromclock(bt, sysclock_active);
1133 getnanouptime(
struct timespec *tsp)
1136 getnanouptime_fromclock(tsp, sysclock_active);
1140 getmicrouptime(
struct timeval *tvp)
1143 getmicrouptime_fromclock(tvp, sysclock_active);
1147 getbintime(
struct bintime *bt)
1150 getbintime_fromclock(bt, sysclock_active);
1154 getnanotime(
struct timespec *tsp)
1157 getnanotime_fromclock(tsp, sysclock_active);
1161 getmicrotime(
struct timeval *tvp)
1164 getmicrouptime_fromclock(tvp, sysclock_active);
1176 dtrace_getnanotime(
struct timespec *tsp)
1183 gen = atomic_load_acq_int(&th->th_generation);
1184 *tsp = th->th_nanotime;
1185 atomic_thread_fence_acq();
1186 }
while (gen == 0 || gen != th->th_generation);
1195 int sysclock_active = SYSCLOCK_FBCK;
1199 extern int time_status;
1200 extern long time_esterror;
1208 sysclock_getsnapshot(
struct sysclock_snap *clock_snap,
int fast)
1210 struct fbclock_info *fbi;
1213 unsigned int delta, gen;
1216 struct fftimehands *ffth;
1217 struct ffclock_info *ffi;
1220 ffi = &clock_snap->ff_info;
1223 fbi = &clock_snap->fb_info;
1228 gen = atomic_load_acq_int(&th->th_generation);
1229 fbi->th_scale = th->th_scale;
1230 fbi->tick_time = th->th_offset;
1233 ffi->tick_time = ffth->tick_time_lerp;
1234 ffi->tick_time_lerp = ffth->tick_time_lerp;
1235 ffi->period = ffth->cest.period;
1236 ffi->period_lerp = ffth->period_lerp;
1237 clock_snap->ffcount = ffth->tick_ffcount;
1241 delta = tc_delta(th);
1242 atomic_thread_fence_acq();
1243 }
while (gen == 0 || gen != th->th_generation);
1245 clock_snap->delta = delta;
1247 clock_snap->sysclock_active = sysclock_active;
1251 clock_snap->fb_info.status = time_status;
1253 bt.sec = time_esterror / 1000000;
1254 bt.frac = ((time_esterror - bt.sec) * 1000000) *
1255 (uint64_t)18446744073709ULL;
1256 clock_snap->fb_info.error = bt;
1260 clock_snap->ffcount += delta;
1263 ffi->leapsec_adjustment = cest.leapsec_total;
1264 if (clock_snap->ffcount > cest.leapsec_next)
1265 ffi->leapsec_adjustment -= cest.leapsec;
1268 clock_snap->ff_info.status = cest.status;
1269 ffcount = clock_snap->ffcount - cest.update_ffcount;
1270 ffclock_convert_delta(ffcount, cest.period, &bt);
1272 bintime_mul(&bt, cest.errb_rate * (uint64_t)18446744073709ULL);
1274 bintime_addx(&bt, cest.errb_abs * (uint64_t)18446744073ULL);
1275 clock_snap->ff_info.error = bt;
1284 sysclock_snap2bintime(
struct sysclock_snap *cs,
struct bintime *bt,
1285 int whichclock, uint32_t flags)
1287 struct bintime boottimebin;
1293 switch (whichclock) {
1295 *bt = cs->fb_info.tick_time;
1299 bintime_addx(bt, cs->fb_info.th_scale * cs->delta);
1301 if ((flags & FBCLOCK_UPTIME) == 0) {
1302 getboottimebin(&boottimebin);
1303 bintime_add(bt, &boottimebin);
1308 if (flags & FFCLOCK_LERP) {
1309 *bt = cs->ff_info.tick_time_lerp;
1310 period = cs->ff_info.period_lerp;
1312 *bt = cs->ff_info.tick_time;
1313 period = cs->ff_info.period;
1317 if (cs->delta > 0) {
1318 ffclock_convert_delta(cs->delta, period, &bt2);
1319 bintime_add(bt, &bt2);
1323 if (flags & FFCLOCK_LEAPSEC)
1324 bt->sec -= cs->ff_info.leapsec_adjustment;
1327 if (flags & FFCLOCK_UPTIME)
1328 bintime_sub(bt, &ffclock_boottime);
1348 struct sysctl_oid *tc_root;
1350 u = tc->tc_frequency / tc->tc_counter_mask;
1354 if (u > hz && tc->tc_quality >= 0) {
1355 tc->tc_quality = -2000;
1357 printf(
"Timecounter \"%s\" frequency %ju Hz",
1358 tc->tc_name, (uintmax_t)tc->tc_frequency);
1359 printf(
" -- Insufficient hz, needs at least %u\n", u);
1361 }
else if (tc->tc_quality >= 0 || bootverbose) {
1362 printf(
"Timecounter \"%s\" frequency %ju Hz quality %d\n",
1363 tc->tc_name, (uintmax_t)tc->tc_frequency,
1367 tc->tc_next = timecounters;
1372 tc_root = SYSCTL_ADD_NODE_WITH_LABEL(NULL,
1373 SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name,
1374 CTLFLAG_RW, 0,
"timecounter description",
"timecounter");
1375 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1376 "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0,
1377 "mask for implemented bits");
1378 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1379 "counter", CTLTYPE_UINT | CTLFLAG_RD, tc,
sizeof(*tc),
1380 sysctl_kern_timecounter_get,
"IU",
"current timecounter value");
1381 SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1382 "frequency", CTLTYPE_U64 | CTLFLAG_RD, tc,
sizeof(*tc),
1383 sysctl_kern_timecounter_freq,
"QU",
"timecounter frequency");
1384 SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO,
1385 "quality", CTLFLAG_RD, &(tc->tc_quality), 0,
1386 "goodness of time counter");
1395 if (tc->tc_quality < 0)
1404 (void)tc->tc_get_timecount(tc);
1405 (void)tc->tc_get_timecount(tc);
1416 tc_getfrequency(
void)
1419 return (
timehands->th_counter->tc_frequency);
1423 sleeping_on_old_rtc(
struct thread *td)
1435 if (td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation) {
1442 static struct mtx tc_setclock_mtx;
1443 MTX_SYSINIT(tc_setclock_init, &tc_setclock_mtx,
"tcsetc", MTX_SPIN);
1452 tc_setclock(
struct timespec *ts)
1459 struct timespec tbef, taft;
1461 struct bintime bt, bt2;
1464 timespec2bintime(ts, &bt);
1466 mtx_lock_spin(&tc_setclock_mtx);
1467 cpu_tick_calibrate(1);
1472 bintime_sub(&bt, &bt2);
1477 mtx_unlock_spin(&tc_setclock_mtx);
1480 atomic_add_rel_int(&rtc_generation, 2);
1481 sleepq_chains_remove_matching(sleeping_on_old_rtc);
1482 if (timestepwarnings) {
1485 "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
1486 (intmax_t)tbef.tv_sec, tbef.tv_nsec,
1487 (intmax_t)taft.tv_sec, taft.tv_nsec,
1488 (intmax_t)ts->tv_sec, ts->tv_nsec);
1491 _Timecounter_Windup(&bt, lock_context);
1501 tc_windup(
struct bintime *new_boottimebin)
1507 _Timecounter_Windup(new_boottimebin, &lock_context);
1511 _Timecounter_Windup(
struct bintime *new_boottimebin,
1518 uint32_t delta, ncount, ogen;
1532 #if defined(RTEMS_SMP) 1537 ogen = th->th_generation;
1538 th->th_generation = 0;
1539 atomic_thread_fence_rel();
1540 #if defined(RTEMS_SMP) 1541 bcopy(tho, th, offsetof(
struct timehands, th_generation));
1543 if (new_boottimebin != NULL)
1544 th->th_boottime = *new_boottimebin;
1551 delta = tc_delta(th);
1557 ffclock_windup(delta);
1559 th->th_offset_count += delta;
1560 th->th_offset_count &= th->th_counter->tc_counter_mask;
1561 while (delta > th->th_counter->tc_frequency) {
1563 delta -= th->th_counter->tc_frequency;
1564 th->th_offset.sec++;
1566 if ((delta > th->th_counter->tc_frequency / 2) &&
1567 (th->th_scale * delta < ((uint64_t)1 << 63))) {
1569 th->th_offset.sec++;
1571 bintime_addx(&th->th_offset, th->th_scale * delta);
1582 if (tho->th_counter->tc_poll_pps)
1583 tho->th_counter->tc_poll_pps(tho->th_counter);
1596 bintime_add(&bt, &th->th_boottime);
1597 i = bt.sec - tho->th_microtime.tv_sec;
1600 for (; i > 0; i--) {
1602 ntp_update_second(&th->th_adjustment, &bt.sec);
1604 th->th_boottime.sec += bt.sec - t;
1607 th->th_bintime = bt;
1608 bintime2timeval(&bt, &th->th_microtime);
1609 bintime2timespec(&bt, &th->th_nanotime);
1615 if ((
timecounter->tc_flags & TC_FLAGS_C2STOP) != 0)
1616 cpu_disable_c2_sleep++;
1617 if ((th->th_counter->tc_flags & TC_FLAGS_C2STOP) != 0)
1618 cpu_disable_c2_sleep--;
1622 th->th_offset_count = ncount;
1624 tc_min_ticktock_freq = max(1,
timecounter->tc_frequency /
1625 (((uint64_t)
timecounter->tc_counter_mask + 1) / 3));
1628 ffclock_change_tc(th);
1655 scale = (uint64_t)1 << 63;
1656 scale += (th->th_adjustment / 1024) * 2199;
1657 scale /= th->th_counter->tc_frequency;
1658 th->th_scale = scale * 2;
1666 atomic_store_rel_int(&th->th_generation, ogen);
1670 switch (sysclock_active) {
1673 time_second = th->th_microtime.tv_sec;
1674 time_uptime = th->th_offset.sec;
1678 time_second = fftimehands->tick_time_lerp.sec;
1679 time_uptime = fftimehands->tick_time_lerp.sec - ffclock_boottime.sec;
1684 #if defined(RTEMS_SMP) 1688 timekeep_push_vdso();
1698 sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS)
1705 strlcpy(newname, tc->tc_name,
sizeof(newname));
1707 error = sysctl_handle_string(oidp, &newname[0],
sizeof(newname), req);
1708 if (error != 0 || req->newptr == NULL)
1712 if (strcmp(newname, tc->tc_name) == 0)
1714 for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) {
1715 if (strcmp(newname, newtc->tc_name) != 0)
1719 (void)newtc->tc_get_timecount(newtc);
1720 (void)newtc->tc_get_timecount(newtc);
1737 SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW,
1738 0, 0, sysctl_kern_timecounter_hardware,
"A",
1739 "Timecounter hardware selected");
1744 sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS)
1750 sbuf_new_for_sysctl(&sb, NULL, 0, req);
1751 for (tc = timecounters; tc != NULL; tc = tc->tc_next) {
1752 if (tc != timecounters)
1753 sbuf_putc(&sb,
' ');
1754 sbuf_printf(&sb,
"%s(%d)", tc->tc_name, tc->tc_quality);
1756 error = sbuf_finish(&sb);
1761 SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD,
1762 0, 0, sysctl_kern_timecounter_choice,
"A",
"Timecounter hardware detected");
1775 abi_aware(
struct pps_state *pps,
int vers)
1778 return ((pps->kcmode & KCMODE_ABIFLAG) && pps->driver_abi >= vers);
1785 pps_seq_t aseq, cseq;
1788 if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC)
1798 if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) {
1799 if (fapi->timeout.tv_sec == -1)
1802 tv.tv_sec = fapi->timeout.tv_sec;
1803 tv.tv_usec = fapi->timeout.tv_nsec / 1000;
1806 aseq = pps->ppsinfo.assert_sequence;
1807 cseq = pps->ppsinfo.clear_sequence;
1808 while (aseq == pps->ppsinfo.assert_sequence &&
1809 cseq == pps->ppsinfo.clear_sequence) {
1810 if (abi_aware(pps, 1) && pps->driver_mtx != NULL) {
1811 if (pps->flags & PPSFLAG_MTX_SPIN) {
1812 err = msleep_spin(pps, pps->driver_mtx,
1815 err = msleep(pps, pps->driver_mtx, PCATCH,
1819 err = tsleep(pps, PCATCH,
"ppsfch", timo);
1821 if (err == EWOULDBLOCK) {
1822 if (fapi->timeout.tv_sec == -1) {
1827 }
else if (err != 0) {
1833 pps->ppsinfo.current_mode = pps->ppsparam.mode;
1834 fapi->pps_info_buf = pps->ppsinfo;
1840 pps_ioctl(u_long cmd, caddr_t data,
struct pps_state *pps)
1851 KASSERT(pps != NULL, (
"NULL pps pointer in pps_ioctl"));
1853 case PPS_IOC_CREATE:
1855 case PPS_IOC_DESTROY:
1857 case PPS_IOC_SETPARAMS:
1859 if (app->mode & ~pps->ppscap)
1863 if ((app->mode & PPS_TSCLK_MASK) == PPS_TSCLK_MASK)
1866 pps->ppsparam = *app;
1868 case PPS_IOC_GETPARAMS:
1870 *app = pps->ppsparam;
1871 app->api_version = PPS_API_VERS_1;
1873 case PPS_IOC_GETCAP:
1874 *(
int*)data = pps->ppscap;
1878 return (pps_fetch(fapi, pps));
1880 case PPS_IOC_FETCH_FFCOUNTER:
1882 if (fapi_ffc->tsformat && fapi_ffc->tsformat !=
1885 if (fapi_ffc->timeout.tv_sec || fapi_ffc->timeout.tv_nsec)
1886 return (EOPNOTSUPP);
1887 pps->ppsinfo_ffc.current_mode = pps->ppsparam.mode;
1888 fapi_ffc->pps_info_buf_ffc = pps->ppsinfo_ffc;
1890 switch (pps->ppsparam.mode & PPS_TSCLK_MASK) {
1891 case PPS_TSCLK_FBCK:
1892 fapi_ffc->pps_info_buf_ffc.assert_timestamp =
1893 pps->ppsinfo.assert_timestamp;
1894 fapi_ffc->pps_info_buf_ffc.clear_timestamp =
1895 pps->ppsinfo.clear_timestamp;
1897 case PPS_TSCLK_FFWD:
1904 case PPS_IOC_KCBIND:
1908 if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC)
1910 if (kapi->kernel_consumer != PPS_KC_HARDPPS)
1912 if (kapi->edge & ~pps->ppscap)
1914 pps->kcmode = (kapi->edge & KCMODE_EDGEMASK) |
1915 (pps->kcmode & KCMODE_ABIFLAG);
1918 return (EOPNOTSUPP);
1926 pps_init(
struct pps_state *pps)
1928 pps->ppscap |= PPS_TSFMT_TSPEC | PPS_CANWAIT;
1929 if (pps->ppscap & PPS_CAPTUREASSERT)
1930 pps->ppscap |= PPS_OFFSETASSERT;
1931 if (pps->ppscap & PPS_CAPTURECLEAR)
1932 pps->ppscap |= PPS_OFFSETCLEAR;
1934 pps->ppscap |= PPS_TSCLK_MASK;
1936 pps->kcmode &= ~KCMODE_ABIFLAG;
1940 pps_init_abi(
struct pps_state *pps)
1944 if (pps->driver_abi > 0) {
1945 pps->kcmode |= KCMODE_ABIFLAG;
1946 pps->kernel_abi = PPS_ABI_VERSION;
1951 pps_capture(
struct pps_state *pps)
1955 KASSERT(pps != NULL, (
"NULL pps pointer in pps_capture"));
1957 pps->capgen = atomic_load_acq_int(&th->th_generation);
1960 pps->capffth = fftimehands;
1962 pps->capcount = th->th_counter->tc_get_timecount(th->th_counter);
1963 atomic_thread_fence_acq();
1964 if (pps->capgen != th->th_generation)
1969 pps_event(
struct pps_state *pps,
int event)
1972 struct timespec ts, *tsp, *osp;
1973 uint32_t tcount, *pcount;
1977 struct timespec *tsp_ffc;
1978 pps_seq_t *pseq_ffc;
1985 KASSERT(pps != NULL, (
"NULL pps pointer in pps_event"));
1987 if ((event & pps->ppsparam.mode) == 0)
1990 if (pps->capgen == 0 || pps->capgen !=
1991 atomic_load_acq_int(&pps->capth->th_generation))
1995 if (event == PPS_CAPTUREASSERT) {
1996 tsp = &pps->ppsinfo.assert_timestamp;
1997 osp = &pps->ppsparam.assert_offset;
1998 foff = pps->ppsparam.mode & PPS_OFFSETASSERT;
2000 fhard = pps->kcmode & PPS_CAPTUREASSERT;
2002 pcount = &pps->ppscount[0];
2003 pseq = &pps->ppsinfo.assert_sequence;
2005 ffcount = &pps->ppsinfo_ffc.assert_ffcount;
2006 tsp_ffc = &pps->ppsinfo_ffc.assert_timestamp;
2007 pseq_ffc = &pps->ppsinfo_ffc.assert_sequence;
2010 tsp = &pps->ppsinfo.clear_timestamp;
2011 osp = &pps->ppsparam.clear_offset;
2012 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR;
2014 fhard = pps->kcmode & PPS_CAPTURECLEAR;
2016 pcount = &pps->ppscount[1];
2017 pseq = &pps->ppsinfo.clear_sequence;
2019 ffcount = &pps->ppsinfo_ffc.clear_ffcount;
2020 tsp_ffc = &pps->ppsinfo_ffc.clear_timestamp;
2021 pseq_ffc = &pps->ppsinfo_ffc.clear_sequence;
2029 if (pps->ppstc != pps->capth->th_counter) {
2030 pps->ppstc = pps->capth->th_counter;
2031 *pcount = pps->capcount;
2032 pps->ppscount[2] = pps->capcount;
2037 tcount = pps->capcount - pps->capth->th_offset_count;
2038 tcount &= pps->capth->th_counter->tc_counter_mask;
2039 bt = pps->capth->th_bintime;
2040 bintime_addx(&bt, pps->capth->th_scale * tcount);
2041 bintime2timespec(&bt, &ts);
2044 atomic_thread_fence_acq();
2045 if (pps->capgen != pps->capth->th_generation)
2048 *pcount = pps->capcount;
2053 timespecadd(tsp, osp, tsp);
2054 if (tsp->tv_nsec < 0) {
2055 tsp->tv_nsec += 1000000000;
2061 *ffcount = pps->capffth->tick_ffcount + tcount;
2062 bt = pps->capffth->tick_time;
2063 ffclock_convert_delta(tcount, pps->capffth->cest.period, &bt);
2064 bintime_add(&bt, &pps->capffth->tick_time);
2065 bintime2timespec(&bt, &ts);
2079 tcount = pps->capcount - pps->ppscount[2];
2080 pps->ppscount[2] = pps->capcount;
2081 tcount &= pps->capth->th_counter->tc_counter_mask;
2082 scale = (uint64_t)1 << 63;
2083 scale /= pps->capth->th_counter->tc_frequency;
2087 bintime_addx(&bt, scale * tcount);
2088 bintime2timespec(&bt, &ts);
2089 hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec);
2109 SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0,
2110 "Approximate number of hardclock ticks in a millisecond");
2115 tc_ticktock(
int cnt)
2119 if (mtx_trylock_spin(&tc_setclock_mtx)) {
2121 if (count >= tc_tick) {
2125 mtx_unlock_spin(&tc_setclock_mtx);
2134 if (_Per_CPU_Is_boot_processor(cpu_self)) {
2150 ogen = th->th_generation;
2151 th->th_offset_count = offset;
2152 bintime_addx(&th->th_offset, th->th_scale * delta);
2155 bintime_add(&bt, &th->th_boottime);
2157 th->th_bintime = bt;
2158 bintime2timeval(&bt, &th->th_microtime);
2159 bintime2timespec(&bt, &th->th_nanotime);
2167 th->th_generation = ogen;
2170 time_second = th->th_microtime.tv_sec;
2171 time_uptime = th->th_offset.sec;
2180 static void __inline
2181 tc_adjprecision(
void)
2185 if (tc_timepercentage > 0) {
2186 t = (99 + tc_timepercentage) / tc_timepercentage;
2187 tc_precexp = fls(t + (t >> 1)) - 1;
2188 FREQ2BT(hz / tc_tick, &bt_timethreshold);
2189 FREQ2BT(hz, &bt_tickthreshold);
2190 bintime_shift(&bt_timethreshold, tc_precexp);
2191 bintime_shift(&bt_tickthreshold, tc_precexp);
2194 bt_timethreshold.sec = INT_MAX;
2195 bt_timethreshold.frac = ~(uint64_t)0;
2196 bt_tickthreshold = bt_timethreshold;
2198 sbt_timethreshold = bttosbt(bt_timethreshold);
2199 sbt_tickthreshold = bttosbt(bt_tickthreshold);
2205 sysctl_kern_timecounter_adjprecision(SYSCTL_HANDLER_ARGS)
2209 val = tc_timepercentage;
2210 error = sysctl_handle_int(oidp, &val, 0, req);
2211 if (error != 0 || req->newptr == NULL)
2213 tc_timepercentage = val;
2222 inittimecounter(
void *dummy)
2236 tc_tick = (hz + 500) / 1000;
2240 FREQ2BT(hz, &tick_bt);
2241 tick_sbt = bttosbt(tick_bt);
2242 tick_rate = hz / tc_tick;
2243 FREQ2BT(tick_rate, &tc_tick_bt);
2244 tc_tick_sbt = bttosbt(tc_tick_bt);
2245 p = (tc_tick * 1000000) / hz;
2246 printf(
"Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000);
2254 mtx_lock_spin(&tc_setclock_mtx);
2256 mtx_unlock_spin(&tc_setclock_mtx);
2259 SYSINIT(
timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL);
2263 static int cpu_tick_variable;
2264 static uint64_t cpu_tick_frequency;
2266 static DPCPU_DEFINE(uint64_t, tc_cpu_ticks_base);
2267 static DPCPU_DEFINE(
unsigned, tc_cpu_ticks_last);
2273 uint64_t res, *base;
2277 base = DPCPU_PTR(tc_cpu_ticks_base);
2278 last = DPCPU_PTR(tc_cpu_ticks_last);
2280 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
2282 *base += (uint64_t)tc->tc_counter_mask + 1;
2290 cpu_tick_calibration(
void)
2292 static time_t last_calib;
2294 if (time_uptime != last_calib && !(time_uptime & 0xf)) {
2295 cpu_tick_calibrate(0);
2296 last_calib = time_uptime;
2309 cpu_tick_calibrate(
int reset)
2311 static uint64_t c_last;
2312 uint64_t c_this, c_delta;
2313 static struct bintime t_last;
2314 struct bintime t_this, t_delta;
2324 if (!cpu_tick_variable)
2327 getbinuptime(&t_this);
2328 c_this = cpu_ticks();
2329 if (t_last.sec != 0) {
2330 c_delta = c_this - c_last;
2332 bintime_sub(&t_delta, &t_last);
2340 divi = t_delta.sec << 20;
2341 divi |= t_delta.frac >> (64 - 20);
2344 if (c_delta > cpu_tick_frequency) {
2345 if (0 && bootverbose)
2346 printf(
"cpu_tick increased to %ju Hz\n",
2348 cpu_tick_frequency = c_delta;
2356 set_cputicker(cpu_tick_f *func, uint64_t freq,
unsigned var)
2360 cpu_ticks = tc_cpu_ticks;
2362 cpu_tick_frequency = freq;
2363 cpu_tick_variable = var;
2372 if (cpu_ticks == tc_cpu_ticks)
2373 return (tc_getfrequency());
2374 return (cpu_tick_frequency);
2388 cputick2usec(uint64_t tick)
2391 if (tick > 18446744073709551LL)
2392 return (tick / (cpu_tickrate() / 1000000LL));
2393 else if (tick > 18446744073709LL)
2394 return ((tick * 1000LL) / (cpu_tickrate() / 1000LL));
2396 return ((tick * 1000000LL) / cpu_tickrate());
2399 cpu_tick_f *cpu_ticks = tc_cpu_ticks;
2403 static int vdso_th_enable = 1;
2405 sysctl_fast_gettime(SYSCTL_HANDLER_ARGS)
2407 int old_vdso_th_enable, error;
2409 old_vdso_th_enable = vdso_th_enable;
2410 error = sysctl_handle_int(oidp, &old_vdso_th_enable, 0, req);
2413 vdso_th_enable = old_vdso_th_enable;
2416 SYSCTL_PROC(_kern_timecounter, OID_AUTO, fast_gettime,
2417 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2418 NULL, 0, sysctl_fast_gettime,
"I",
"Enable fast time of day");
2421 tc_fill_vdso_timehands(
struct vdso_timehands *vdso_th)
2427 vdso_th->th_scale = th->th_scale;
2428 vdso_th->th_offset_count = th->th_offset_count;
2429 vdso_th->th_counter_mask = th->th_counter->tc_counter_mask;
2430 vdso_th->th_offset = th->th_offset;
2431 vdso_th->th_boottime = th->th_boottime;
2432 if (th->th_counter->tc_fill_vdso_timehands != NULL) {
2433 enabled = th->th_counter->tc_fill_vdso_timehands(vdso_th,
2437 if (!vdso_th_enable)
2443 #ifdef COMPAT_FREEBSD32 2445 tc_fill_vdso_timehands32(
struct vdso_timehands32 *vdso_th32)
2451 *(uint64_t *)&vdso_th32->th_scale[0] = th->th_scale;
2452 vdso_th32->th_offset_count = th->th_offset_count;
2453 vdso_th32->th_counter_mask = th->th_counter->tc_counter_mask;
2454 vdso_th32->th_offset.sec = th->th_offset.sec;
2455 *(uint64_t *)&vdso_th32->th_offset.frac[0] = th->th_offset.frac;
2456 vdso_th32->th_boottime.sec = th->th_boottime.sec;
2457 *(uint64_t *)&vdso_th32->th_boottime.frac[0] = th->th_boottime.frac;
2458 if (th->th_counter->tc_fill_vdso_timehands32 != NULL) {
2459 enabled = th->th_counter->tc_fill_vdso_timehands32(vdso_th32,
2463 if (!vdso_th_enable)
void _Timecounter_Tick_simple(uint32_t delta, uint32_t offset, ISR_lock_Context *lock_context)
Performs a simple timecounter tick.
Inlined Routines in the Watchdog Handler.
#define _Timecounter_Release(lock_context)
Releases the timecounter lock.
#define _Timecounter_Acquire(lock_context)
Lock to protect the timecounter mechanic.
SuperCore SMP Support API.
void _Timecounter_Tick(void)
Performs a timecounter tick.
Timecounter Implementation.
void _Timecounter_Set_clock(const struct bintime *_bt, ISR_lock_Context *lock_context)
Sets the timecounter clock to the given value.
This header file defines the RTEMS Classic API.
#define ISR_LOCK_DEFINE(_qualifier, _designator, _name)
Defines an ISR lock variable.
Local ISR lock context for acquire and release pairs.
sbintime_t _Timecounter_Sbinuptime(void)
Returns the uptime in the sbintime_t format.
#define TOD_SECONDS_1970_THROUGH_1988
void _Watchdog_Tick(struct Per_CPU_Control *cpu)
Performs a watchdog tick.