RTEMS
t-test-interrupt.c
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-2-Clause */
2 
11 /*
12  * Copyright (C) 2020 embedded brains GmbH (http://www.embedded-brains.de)
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  * notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  * notice, this list of conditions and the following disclaimer in the
21  * documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39 
40 #include <rtems/test.h>
41 
42 #include <rtems/score/atomic.h>
43 #include <rtems/score/percpu.h>
44 #include <rtems/score/thread.h>
49 
50 #ifdef RTEMS_SMP
51 #include <rtems/score/smpimpl.h>
52 #endif
53 
54 typedef T_interrupt_test_state (*T_interrupt_test_handler)(void *);
55 
56 #define T_INTERRUPT_SAMPLE_COUNT 8
57 
58 typedef struct {
59  uint_fast32_t one_tick_busy;
60  int64_t t0;
61  Thread_Control *self;
62  Atomic_Uint state;
63  void (*prepare)(void *);
64  void (*action)(void *);
65  T_interrupt_test_state (*interrupt)(void *);
66  void (*blocked)(void *);
67  void *arg;
68 #ifdef RTEMS_SMP
69  Per_CPU_Job job;
70  Per_CPU_Job_context job_context;
71 #endif
72  Watchdog_Control wdg;
74  T_fixture_node node;
76 
77 typedef struct {
78  int64_t t;
79  int64_t d;
81 
82 static void
83 T_interrupt_sort(T_interrupt_clock_time *ct, size_t n)
84 {
85  size_t i;
86 
87  /* Bubble sort */
88  for (i = 1; i < n ; ++i) {
89  size_t j;
90 
91  for (j = 0; j < n - i; ++j) {
92  if (ct[j].d > ct[j + 1].d) {
94 
95  tmp = ct[j];
96  ct[j] = ct[j + 1];
97  ct[j + 1] = tmp;
98  }
99  }
100  }
101 }
102 
103 static int64_t
104 T_interrupt_time_close_to_tick(void)
105 {
108  T_interrupt_clock_time ct[12];
110  int32_t ns_per_tick;
111  size_t i;
112  size_t n;
113 
114  ns_per_tick = (int32_t)_Watchdog_Nanoseconds_per_tick;
115  n = RTEMS_ARRAY_SIZE(ct);
117 
118  for (i = 0; i < n; ++i) {
119  do {
122  } while (c0 == c1);
123 
124  c0 = c1;
125  ct[i].t = sbttons(t);
126  }
127 
128  for (i = 1; i < n; ++i) {
129  int64_t d;
130 
131  d = (ct[i].t - ct[1].t) % ns_per_tick;
132 
133  if (d > ns_per_tick / 2) {
134  d -= ns_per_tick;
135  }
136 
137  ct[i].d = d;
138  }
139 
140  /*
141  * Use the median and not the arithmetic mean since on simulator
142  * platforms there may be outliers.
143  */
144  T_interrupt_sort(&ct[1], n - 1);
145  return ct[1 + (n - 1) / 2].t;
146 }
147 
148 static void
149 T_interrupt_watchdog(Watchdog_Control *wdg)
150 {
151  T_interrupt_context *ctx;
152  ISR_Level level;
153  T_interrupt_test_state state;
154  unsigned int expected;
155 
156  ctx = RTEMS_CONTAINER_OF(wdg, T_interrupt_context, wdg);
157 
158  _ISR_Local_disable(level);
160  _Watchdog_Get_CPU(&ctx->wdg), 1);
161  _ISR_Local_enable(level);
162 
163  state = (*ctx->interrupt)(ctx->arg);
164 
165  expected = T_INTERRUPT_TEST_ACTION;
166  _Atomic_Compare_exchange_uint(&ctx->state, &expected,
167  state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
168 }
169 
170 static void
171 T_interrupt_watchdog_insert(T_interrupt_context *ctx)
172 {
173  ISR_Level level;
174 
175  _ISR_Local_disable(level);
176  _Watchdog_Per_CPU_insert_ticks(&ctx->wdg, _Per_CPU_Get(), 1);
177  _ISR_Local_enable(level);
178 }
179 
180 static void
181 T_interrupt_watchdog_remove(T_interrupt_context *ctx)
182 {
183  ISR_Level level;
184 
185  _ISR_Local_disable(level);
187  _ISR_Local_enable(level);
188 }
189 
190 static void
191 T_interrupt_init_once(T_interrupt_context *ctx)
192 {
193  ctx->t0 = T_interrupt_time_close_to_tick();
194  ctx->one_tick_busy = T_get_one_clock_tick_busy();
195 }
196 
197 static T_interrupt_test_state
198 T_interrupt_continue(void *arg)
199 {
200  (void)arg;
201  return T_INTERRUPT_TEST_CONTINUE;
202 }
203 
204 static void
205 T_interrupt_do_nothing(void *arg)
206 {
207  (void)arg;
208 }
209 
210 #ifdef RTEMS_SMP
211 static void
212 T_interrupt_blocked(void *arg)
213 {
214  T_interrupt_context *ctx;
215 
216  ctx = arg;
217  (*ctx->blocked)(ctx->arg);
218 }
219 #endif
220 
221 static void T_interrupt_thread_switch(Thread_Control *, Thread_Control *);
222 
223 static T_interrupt_context T_interrupt_instance = {
224  .interrupt = T_interrupt_continue,
225  .blocked = T_interrupt_do_nothing,
226 #ifdef RTEMS_SMP
227  .job = {
228  .context = &T_interrupt_instance.job_context
229  },
230  .job_context = {
231  .handler = T_interrupt_blocked,
232  .arg = &T_interrupt_instance
233  },
234 #endif
235  .wdg = WATCHDOG_INITIALIZER(T_interrupt_watchdog),
236  .ext = {
237  .Callouts = {
238  .thread_switch = T_interrupt_thread_switch
239  }
240  }
241 };
242 
243 T_interrupt_test_state
244 T_interrupt_test_change_state(T_interrupt_test_state expected_state,
245  T_interrupt_test_state desired_state)
246 {
247  T_interrupt_context *ctx;
248  unsigned int expected;
249 
250  ctx = &T_interrupt_instance;
251  expected = expected_state;
252  _Atomic_Compare_exchange_uint(&ctx->state, &expected,
253  desired_state, ATOMIC_ORDER_RELAXED, ATOMIC_ORDER_RELAXED);
254 
255  return expected;
256 }
257 
258 T_interrupt_test_state
259 T_interrupt_test_get_state(void)
260 {
261  T_interrupt_context *ctx;
262 
263  ctx = &T_interrupt_instance;
264  return _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
265 }
266 
267 void
268 T_interrupt_test_busy_wait_for_interrupt(void)
269 {
270  T_interrupt_context *ctx;
271  unsigned int state;
272 
273  ctx = &T_interrupt_instance;
274 
275  do {
276  state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
277  } while (state == T_INTERRUPT_TEST_ACTION);
278 }
279 
280 static void
281 T_interrupt_thread_switch(Thread_Control *executing, Thread_Control *heir)
282 {
283  T_interrupt_context *ctx;
284 
285  (void)heir;
286  ctx = &T_interrupt_instance;
287 
288  if (ctx->self == executing) {
289  T_interrupt_test_state state;
290 
291  state = _Atomic_Load_uint(&ctx->state, ATOMIC_ORDER_RELAXED);
292 
293  if (state != T_INTERRUPT_TEST_INITIAL) {
294 #ifdef RTEMS_SMP
295  Per_CPU_Control *cpu_self;
296 
297  /*
298  * In SMP configurations, the thread switch extension
299  * runs in a very restricted environment. Interrupts
300  * are disabled and the caller owns the per-CPU lock.
301  * In order to avoid deadlocks at SMP lock level, we
302  * have to use an SMP job which runs later in the
303  * context of the inter-processor interrupt.
304  */
305  cpu_self = _Per_CPU_Get();
306  _Per_CPU_Add_job(cpu_self, &ctx->job);
307  _SMP_Send_message(_Per_CPU_Get_index(cpu_self),
309 #else
310  (*ctx->blocked)(ctx->arg);
311 #endif
312  }
313  }
314 }
315 
316 static T_interrupt_context *
317 T_interrupt_setup(const T_interrupt_test_config *config, void *arg)
318 {
319  T_interrupt_context *ctx;
320 
321  T_quiet_assert_not_null(config->action);
322  T_quiet_assert_not_null(config->interrupt);
323  ctx = &T_interrupt_instance;
324  ctx->self = _Thread_Get_executing();
325  ctx->arg = arg;
326  ctx->interrupt = config->interrupt;
327 
328  if (config->blocked != NULL) {
329  ctx->blocked = config->blocked;
330  }
331 
332  if (ctx->t0 == 0) {
333  T_interrupt_init_once(ctx);
334  }
335 
336  _User_extensions_Add_set(&ctx->ext);
337  T_interrupt_watchdog_insert(ctx);
338  return ctx;
339 }
340 
341 static void
342 T_interrupt_teardown(void *arg)
343 {
344  T_interrupt_context *ctx;
345 
346  ctx = arg;
347  ctx->interrupt = T_interrupt_continue;
348  ctx->blocked = T_interrupt_do_nothing;
349  T_interrupt_watchdog_remove(ctx);
350  _User_extensions_Remove_set(&ctx->ext);
351  ctx->self = NULL;
352  ctx->arg = NULL;
353 }
354 
355 static const T_fixture T_interrupt_fixture = {
356  .teardown = T_interrupt_teardown,
357  .initial_context = &T_interrupt_instance
358 };
359 
360 T_interrupt_test_state
361 T_interrupt_test(const T_interrupt_test_config *config, void *arg)
362 {
363  T_interrupt_context *ctx;
364  uint_fast32_t lower_bound[T_INTERRUPT_SAMPLE_COUNT];
365  uint_fast32_t upper_bound[T_INTERRUPT_SAMPLE_COUNT];
366  uint_fast32_t lower_sum;
367  uint_fast32_t upper_sum;
368  int32_t ns_per_tick;
369  size_t sample;
370  uint32_t iter;
371 
372  ctx = T_interrupt_setup(config, arg);
373  T_push_fixture(&ctx->node, &T_interrupt_fixture);
374  ns_per_tick = (int32_t)_Watchdog_Nanoseconds_per_tick;
375  lower_sum = 0;
376  upper_sum = T_INTERRUPT_SAMPLE_COUNT * ctx->one_tick_busy;
377 
378  for (sample = 0; sample < T_INTERRUPT_SAMPLE_COUNT; ++sample) {
379  lower_bound[sample] = 0;
380  upper_bound[sample] = ctx->one_tick_busy;
381  }
382 
383  sample = 0;
384 
385  for (iter = 0; iter < config->max_iteration_count; ++iter) {
386  T_interrupt_test_state state;
387  int64_t t;
388  int64_t d;
391  uint_fast32_t busy;
392  uint_fast32_t delta;
393 
394  if (config->prepare != NULL) {
395  (*config->prepare)(arg);
396  }
397 
398  /*
399  * We use some sort of a damped bisection to find the right
400  * interrupt time point.
401  */
402  busy = (lower_sum + upper_sum) /
403  (2 * T_INTERRUPT_SAMPLE_COUNT);
404 
405  t = sbttons(_Timecounter_Sbinuptime());
406  d = (t - ctx->t0) % ns_per_tick;
407  t += ns_per_tick / 4 - d;
408 
409  if (d > ns_per_tick / 8) {
410  t += ns_per_tick;
411  }
412 
413  /*
414  * The s1 value is a future time point close to 25% of a clock
415  * tick interval.
416  */
417  s1 = nstosbt(t);
418 
419  /*
420  * The path from here to the action call must avoid anything
421  * which can cause jitters. We wait until 25% of the clock
422  * tick interval are elapsed using the timecounter. Then we do
423  * a busy wait and call the action. The interrupt time point
424  * is controlled by the busy count.
425  */
426 
427  do {
429  } while (s0 < s1);
430 
431  _Atomic_Store_uint(&ctx->state, T_INTERRUPT_TEST_ACTION,
432  ATOMIC_ORDER_RELAXED);
433  T_busy(busy);
434  (*config->action)(arg);
435 
436  state = _Atomic_Exchange_uint(&ctx->state,
437  T_INTERRUPT_TEST_INITIAL, ATOMIC_ORDER_RELAXED);
438 
439  if (state == T_INTERRUPT_TEST_DONE) {
440  break;
441  }
442 
443  /* Adjust the lower/upper bound of the bisection interval */
444  if (state == T_INTERRUPT_TEST_EARLY) {
445  uint_fast32_t lower;
446 
447  upper_sum -= upper_bound[sample];
448  upper_sum += busy;
449  upper_bound[sample] = busy;
450 
451  /* Round down to make sure no underflow happens */
452  lower = lower_bound[sample];
453  delta = lower / 32;
454  lower_sum -= delta;
455  lower_bound[sample] = lower - delta;
456 
457  sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
458  } else if (state == T_INTERRUPT_TEST_LATE) {
459  uint_fast32_t upper;
460 
461  lower_sum -= lower_bound[sample];
462  lower_sum += busy;
463  lower_bound[sample] = busy;
464 
465  /*
466  * The one tick busy count value is not really
467  * trustable on some platforms. Allow the upper bound
468  * to grow over this value in time.
469  */
470  upper = upper_bound[sample];
471  delta = (upper + 31) / 32;
472  upper_sum += delta;
473  upper_bound[sample] = upper + delta;
474 
475  sample = (sample + 1) % T_INTERRUPT_SAMPLE_COUNT;
476  }
477  }
478 
479  T_pop_fixture();
480 
481  if (iter == config->max_iteration_count) {
482  return T_INTERRUPT_TEST_TIMEOUT;
483  }
484 
485  return T_INTERRUPT_TEST_DONE;
486 }
int64_t Timestamp_Control
Definition: timestamp.h:57
Per_CPU_Job_handler handler
The job handler.
Definition: percpu.h:188
uint32_t Watchdog_Interval
Type is used to specify the length of intervals.
Definition: watchdogticks.h:38
#define _ISR_Local_disable(_level)
Disables interrupts on this processor.
Definition: isrlevel.h:57
Inlined Routines in the Watchdog Handler.
A per-processor job.
Definition: percpu.h:209
The control block used to manage each watchdog timer.
Definition: watchdog.h:90
const uint32_t _Watchdog_Nanoseconds_per_tick
The watchdog nanoseconds per tick.
User Extension Handler API.
Helpers for Manipulating Timestamps.
static __inline__ struct _Thread_Control * _Thread_Get_executing(void)
Returns the thread control block of the executing thread.
Definition: percpu.h:878
#define RTEMS_CONTAINER_OF(_m, _type, _member_name)
Returns the pointer to the container of a specified member pointer.
Definition: basedefs.h:550
#define SMP_MESSAGE_PERFORM_JOBS
SMP message to perform per-processor jobs.
Definition: smpimpl.h:50
static __inline__ Per_CPU_Control * _Watchdog_Get_CPU(const Watchdog_Control *the_watchdog)
Gets the watchdog&#39;s cpu.
Definition: watchdogimpl.h:177
Per CPU Core Structure.
Definition: percpu.h:347
void _User_extensions_Add_set(User_extensions_Control *extension)
Adds a user extension.
Definition: userextaddset.c:45
uint32_t ISR_Level
Definition: isrlevel.h:41
void _User_extensions_Remove_set(User_extensions_Control *extension)
Removes a user extension.
void _Per_CPU_Add_job(Per_CPU_Control *cpu, Per_CPU_Job *job)
Adds the job to the tail of the processing list of the specified processor.
#define _ISR_Local_enable(_level)
Enables interrupts on this processor.
Definition: isrlevel.h:74
Context for per-processor jobs.
Definition: percpu.h:184
#define WATCHDOG_INITIALIZER(routine)
Watchdog initializer for static initialization.
Definition: watchdogimpl.h:79
Atomic Operations API.
static __inline__ uint64_t _Watchdog_Per_CPU_insert_ticks(Watchdog_Control *the_watchdog, Per_CPU_Control *cpu, Watchdog_Interval ticks)
Sets the watchdog&#39;s cpu to the given instance and sets its expiration time to the watchdog expiration...
Definition: watchdogimpl.h:593
static __inline__ void _Watchdog_Per_CPU_remove_ticks(Watchdog_Control *the_watchdog)
Removes the watchdog from the cpu and the scheduled watchdogs.
Definition: watchdogimpl.h:670
Definition: test.h:61
#define RTEMS_ARRAY_SIZE(_array)
Returns the element count of the specified array.
Definition: basedefs.h:459
Manages each user extension set.
Definition: userextdata.h:50
int64_t _Timecounter_Sbinuptime(void)
Returns the uptime in the sbintime_t format.
Definition: kern_tc.c:487
Constants and Structures Related with the Thread Control Block.
volatile Watchdog_Interval _Watchdog_Ticks_since_boot
The watchdog ticks counter.
Timecounter API.
void _SMP_Send_message(uint32_t cpu_index, unsigned long message)
Sends an SMP message to a processor.
Definition: smp.c:215
SuperCore SMP Implementation.