40 #include <rtems/test.h> 54 typedef T_interrupt_test_state (*T_interrupt_test_handler)(
void *);
56 #define T_INTERRUPT_SAMPLE_COUNT 8 59 uint_fast32_t one_tick_busy;
63 void (*prepare)(
void *);
64 void (*action)(
void *);
65 T_interrupt_test_state (*interrupt)(
void *);
66 void (*blocked)(
void *);
88 for (i = 1; i < n ; ++i) {
91 for (j = 0; j < n - i; ++j) {
92 if (ct[j].d > ct[j + 1].d) {
104 T_interrupt_time_close_to_tick(
void)
118 for (i = 0; i < n; ++i) {
125 ct[i].t = sbttons(t);
128 for (i = 1; i < n; ++i) {
131 d = (ct[i].t - ct[1].t) % ns_per_tick;
133 if (d > ns_per_tick / 2) {
144 T_interrupt_sort(&ct[1], n - 1);
145 return ct[1 + (n - 1) / 2].t;
153 T_interrupt_test_state state;
154 unsigned int expected;
163 state = (*ctx->interrupt)(ctx->arg);
165 expected = T_INTERRUPT_TEST_ACTION;
166 _Atomic_Compare_exchange_uint(&ctx->state, &expected,
167 state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
193 ctx->t0 = T_interrupt_time_close_to_tick();
194 ctx->one_tick_busy = T_get_one_clock_tick_busy();
197 static T_interrupt_test_state
198 T_interrupt_continue(
void *arg)
201 return T_INTERRUPT_TEST_CONTINUE;
205 T_interrupt_do_nothing(
void *arg)
212 T_interrupt_blocked(
void *arg)
217 (*ctx->blocked)(ctx->arg);
224 .interrupt = T_interrupt_continue,
225 .blocked = T_interrupt_do_nothing,
228 .context = &T_interrupt_instance.job_context
231 .
handler = T_interrupt_blocked,
232 .arg = &T_interrupt_instance
238 .thread_switch = T_interrupt_thread_switch
243 T_interrupt_test_state
244 T_interrupt_test_change_state(T_interrupt_test_state expected_state,
245 T_interrupt_test_state desired_state)
248 unsigned int expected;
250 ctx = &T_interrupt_instance;
251 expected = expected_state;
252 _Atomic_Compare_exchange_uint(&ctx->state, &expected,
253 desired_state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
258 T_interrupt_test_state
259 T_interrupt_test_get_state(
void)
263 ctx = &T_interrupt_instance;
264 return _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
268 T_interrupt_test_busy_wait_for_interrupt(
void)
273 ctx = &T_interrupt_instance;
276 state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
277 }
while (state == T_INTERRUPT_TEST_ACTION);
286 ctx = &T_interrupt_instance;
288 if (ctx->self == executing) {
289 T_interrupt_test_state state;
291 state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
293 if (state != T_INTERRUPT_TEST_INITIAL) {
305 cpu_self = _Per_CPU_Get();
310 (*ctx->blocked)(ctx->arg);
321 T_quiet_assert_not_null(config->action);
322 T_quiet_assert_not_null(config->interrupt);
323 ctx = &T_interrupt_instance;
326 ctx->interrupt = config->interrupt;
328 if (config->blocked != NULL) {
329 ctx->blocked = config->blocked;
333 T_interrupt_init_once(ctx);
337 T_interrupt_watchdog_insert(ctx);
342 T_interrupt_teardown(
void *arg)
347 ctx->interrupt = T_interrupt_continue;
348 ctx->blocked = T_interrupt_do_nothing;
349 T_interrupt_watchdog_remove(ctx);
355 static const T_fixture T_interrupt_fixture = {
356 .teardown = T_interrupt_teardown,
357 .initial_context = &T_interrupt_instance
360 T_interrupt_test_state
364 uint_fast32_t lower_bound[T_INTERRUPT_SAMPLE_COUNT];
365 uint_fast32_t upper_bound[T_INTERRUPT_SAMPLE_COUNT];
366 uint_fast32_t lower_sum;
367 uint_fast32_t upper_sum;
372 ctx = T_interrupt_setup(config, arg);
373 T_push_fixture(&ctx->node, &T_interrupt_fixture);
376 upper_sum = T_INTERRUPT_SAMPLE_COUNT * ctx->one_tick_busy;
378 for (sample = 0; sample < T_INTERRUPT_SAMPLE_COUNT; ++sample) {
379 lower_bound[sample] = 0;
380 upper_bound[sample] = ctx->one_tick_busy;
385 for (iter = 0; iter < config->max_iteration_count; ++iter) {
386 T_interrupt_test_state state;
394 if (config->prepare != NULL) {
395 (*config->prepare)(arg);
402 busy = (lower_sum + upper_sum) /
403 (2 * T_INTERRUPT_SAMPLE_COUNT);
406 d = (t - ctx->t0) % ns_per_tick;
407 t += ns_per_tick / 4 - d;
409 if (d > ns_per_tick / 8) {
431 _Atomic_Store_uint(&ctx->state, T_INTERRUPT_TEST_ACTION,
432 ATOMIC_ORDER_RELAXED);
434 (*config->action)(arg);
436 state = _Atomic_Exchange_uint(&ctx->state,
437 T_INTERRUPT_TEST_INITIAL, ATOMIC_ORDER_RELAXED);
439 if (state == T_INTERRUPT_TEST_DONE) {
444 if (state == T_INTERRUPT_TEST_EARLY) {
447 upper_sum -= upper_bound[sample];
449 upper_bound[sample] = busy;
452 lower = lower_bound[sample];
455 lower_bound[sample] = lower - delta;
457 sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
458 }
else if (state == T_INTERRUPT_TEST_LATE) {
461 lower_sum -= lower_bound[sample];
463 lower_bound[sample] = busy;
470 upper = upper_bound[sample];
471 delta = (upper + 31) / 32;
473 upper_bound[sample] = upper + delta;
475 sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
481 if (iter == config->max_iteration_count) {
482 return T_INTERRUPT_TEST_TIMEOUT;
485 return T_INTERRUPT_TEST_DONE;
int64_t Timestamp_Control
Per_CPU_Job_handler handler
The job handler.
uint32_t Watchdog_Interval
Type is used to specify the length of intervals.
#define _ISR_Local_disable(_level)
Disables interrupts on this processor.
Inlined Routines in the Watchdog Handler.
The control block used to manage each watchdog timer.
const uint32_t _Watchdog_Nanoseconds_per_tick
The watchdog nanoseconds per tick.
User Extension Handler API.
Helpers for Manipulating Timestamps.
static __inline__ struct _Thread_Control * _Thread_Get_executing(void)
Returns the thread control block of the executing thread.
#define RTEMS_CONTAINER_OF(_m, _type, _member_name)
Returns the pointer to the container of a specified member pointer.
#define SMP_MESSAGE_PERFORM_JOBS
SMP message to perform per-processor jobs.
static __inline__ Per_CPU_Control * _Watchdog_Get_CPU(const Watchdog_Control *the_watchdog)
Gets the watchdog's cpu.
void _User_extensions_Add_set(User_extensions_Control *extension)
Adds a user extension.
void _User_extensions_Remove_set(User_extensions_Control *extension)
Removes a user extension.
void _Per_CPU_Add_job(Per_CPU_Control *cpu, Per_CPU_Job *job)
Adds the job to the tail of the processing list of the specified processor.
#define _ISR_Local_enable(_level)
Enables interrupts on this processor.
Context for per-processor jobs.
#define WATCHDOG_INITIALIZER(routine)
Watchdog initializer for static initialization.
static __inline__ uint64_t _Watchdog_Per_CPU_insert_ticks(Watchdog_Control *the_watchdog, Per_CPU_Control *cpu, Watchdog_Interval ticks)
Sets the watchdog's cpu to the given instance and sets its expiration time to the watchdog expiration...
static __inline__ void _Watchdog_Per_CPU_remove_ticks(Watchdog_Control *the_watchdog)
Removes the watchdog from the cpu and the scheduled watchdogs.
#define RTEMS_ARRAY_SIZE(_array)
Returns the element count of the specified array.
Manages each user extension set.
int64_t _Timecounter_Sbinuptime(void)
Returns the uptime in the sbintime_t format.
Constants and Structures Related with the Thread Control Block.
volatile Watchdog_Interval _Watchdog_Ticks_since_boot
The watchdog ticks counter.
void _SMP_Send_message(uint32_t cpu_index, unsigned long message)
Sends an SMP message to a processor.
SuperCore SMP Implementation.