RTEMS  5.1
cpu.h
Go to the documentation of this file.
1 
7 /*
8  * This include file contains information pertaining to the ARM
9  * processor.
10  *
11  * Copyright (c) 2009, 2017 embedded brains GmbH
12  *
13  * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
14  *
15  * Copyright (c) 2006 OAR Corporation
16  *
17  * Copyright (c) 2002 Advent Networks, Inc.
18  * Jay Monkman <jmonkman@adventnetworks.com>
19  *
20  * COPYRIGHT (c) 2000 Canon Research Centre France SA.
21  * Emmanuel Raguet, mailto:raguet@crf.canon.fr
22  *
23  * The license and distribution terms for this file may be
24  * found in the file LICENSE in this distribution or at
25  * http://www.rtems.org/license/LICENSE.
26  *
27  */
28 
29 #ifndef _RTEMS_SCORE_CPU_H
30 #define _RTEMS_SCORE_CPU_H
31 
32 #include <rtems/score/basedefs.h>
33 #if defined(RTEMS_PARAVIRT)
34 #include <rtems/score/paravirt.h>
35 #endif
36 #include <rtems/score/arm.h>
37 
44 #if defined(ARM_MULTILIB_ARCH_V4)
45 
46 #if defined(__thumb__) && !defined(__thumb2__)
47  #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
48  #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
49  #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
50  #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
51  #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
52 #else
53  #define ARM_SWITCH_REGISTERS
54  #define ARM_SWITCH_TO_ARM
55  #define ARM_SWITCH_BACK
56  #define ARM_SWITCH_OUTPUT
57  #define ARM_SWITCH_ADDITIONAL_OUTPUT
58 #endif
59 
65 #define ARM_PSR_N (1 << 31)
66 #define ARM_PSR_Z (1 << 30)
67 #define ARM_PSR_C (1 << 29)
68 #define ARM_PSR_V (1 << 28)
69 #define ARM_PSR_Q (1 << 27)
70 #define ARM_PSR_J (1 << 24)
71 #define ARM_PSR_GE_SHIFT 16
72 #define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
73 #define ARM_PSR_E (1 << 9)
74 #define ARM_PSR_A (1 << 8)
75 #define ARM_PSR_I (1 << 7)
76 #define ARM_PSR_F (1 << 6)
77 #define ARM_PSR_T (1 << 5)
78 #define ARM_PSR_M_SHIFT 0
79 #define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
80 #define ARM_PSR_M_USR 0x10
81 #define ARM_PSR_M_FIQ 0x11
82 #define ARM_PSR_M_IRQ 0x12
83 #define ARM_PSR_M_SVC 0x13
84 #define ARM_PSR_M_ABT 0x17
85 #define ARM_PSR_M_HYP 0x1a
86 #define ARM_PSR_M_UND 0x1b
87 #define ARM_PSR_M_SYS 0x1f
88 
91 #endif /* defined(ARM_MULTILIB_ARCH_V4) */
92 
93 /*
94  * The ARM uses the PIC interrupt model.
95  */
96 #define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
97 
98 #define CPU_ISR_PASSES_FRAME_POINTER FALSE
99 
100 #define CPU_HARDWARE_FP FALSE
101 
102 #define CPU_SOFTWARE_FP FALSE
103 
104 #define CPU_ALL_TASKS_ARE_FP FALSE
105 
106 #define CPU_IDLE_TASK_IS_FP FALSE
107 
108 #define CPU_USE_DEFERRED_FP_SWITCH FALSE
109 
110 #define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
111 
112 #define CPU_STACK_GROWS_UP FALSE
113 
114 #if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
115  #define CPU_CACHE_LINE_BYTES 64
116 #else
117  #define CPU_CACHE_LINE_BYTES 32
118 #endif
119 
120 #define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
121 
122 #define CPU_MODES_INTERRUPT_MASK 0x1
123 
124 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
125 
126 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
127 
128 #define CPU_STACK_MINIMUM_SIZE (1024 * 4)
129 
130 /* AAPCS, section 4.1, Fundamental Data Types */
131 #define CPU_SIZEOF_POINTER 4
132 
133 /* AAPCS, section 4.1, Fundamental Data Types */
134 #define CPU_ALIGNMENT 8
135 
136 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
137 
138 /* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
139 #define CPU_STACK_ALIGNMENT 8
140 
141 #define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
142 
143 /*
144  * Bitfield handler macros.
145  *
146  * If we had a particularly fast function for finding the first
147  * bit set in a word, it would go here. Since we don't (*), we'll
148  * just use the universal macros.
149  *
150  * (*) On ARM V5 and later, there's a CLZ function which could be
151  * used to implement much quicker than the default macro.
152  */
153 
154 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
155 
156 #define CPU_MAXIMUM_PROCESSORS 32
157 
158 #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
159  #define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
160 #endif
161 
162 #ifdef ARM_MULTILIB_VFP
163  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
164 #endif
165 
166 #ifdef ARM_MULTILIB_ARCH_V4
167  #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
168 #endif
169 
170 #ifdef RTEMS_SMP
171  #if defined(ARM_MULTILIB_VFP)
172  #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
173  #elif defined(ARM_MULTILIB_HAS_THREAD_ID_REGISTER)
174  #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
175  #else
176  #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 44
177  #endif
178 #endif
179 
180 #define ARM_EXCEPTION_FRAME_SIZE 80
181 
182 #define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
183 
184 #define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
185 
186 #define ARM_VFP_CONTEXT_SIZE 264
187 
188 #ifndef ASM
189 
190 #ifdef __cplusplus
191 extern "C" {
192 #endif
193 
194 typedef struct {
195 #if defined(ARM_MULTILIB_ARCH_V4)
196  uint32_t register_r4;
197  uint32_t register_r5;
198  uint32_t register_r6;
199  uint32_t register_r7;
200  uint32_t register_r8;
201  uint32_t register_r9;
202  uint32_t register_r10;
203  uint32_t register_fp;
204  uint32_t register_sp;
205  uint32_t register_lr;
206  uint32_t isr_dispatch_disable;
207 #elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
208  uint32_t register_r4;
209  uint32_t register_r5;
210  uint32_t register_r6;
211  uint32_t register_r7;
212  uint32_t register_r8;
213  uint32_t register_r9;
214  uint32_t register_r10;
215  uint32_t register_r11;
216  void *register_lr;
217  void *register_sp;
218  uint32_t isr_nest_level;
219 #else
220  void *register_sp;
221 #endif
222 #ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
223  uint32_t thread_id;
224 #endif
225 #ifdef ARM_MULTILIB_VFP
226  uint64_t register_d8;
227  uint64_t register_d9;
228  uint64_t register_d10;
229  uint64_t register_d11;
230  uint64_t register_d12;
231  uint64_t register_d13;
232  uint64_t register_d14;
233  uint64_t register_d15;
234 #endif
235 #ifdef RTEMS_SMP
236  volatile bool is_executing;
237 #endif
239 
240 static inline void _ARM_Data_memory_barrier( void )
241 {
242 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
243  __asm__ volatile ( "dmb" : : : "memory" );
244 #else
246 #endif
247 }
248 
249 static inline void _ARM_Data_synchronization_barrier( void )
250 {
251 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
252  __asm__ volatile ( "dsb" : : : "memory" );
253 #else
255 #endif
256 }
257 
258 static inline void _ARM_Instruction_synchronization_barrier( void )
259 {
260 #ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
261  __asm__ volatile ( "isb" : : : "memory" );
262 #else
264 #endif
265 }
266 
267 #if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
268 uint32_t arm_interrupt_disable( void );
269 void arm_interrupt_enable( uint32_t level );
270 void arm_interrupt_flash( uint32_t level );
271 #else
272 static inline uint32_t arm_interrupt_disable( void )
273 {
274  uint32_t level;
275 
276 #if defined(ARM_MULTILIB_ARCH_V4)
277  uint32_t arm_switch_reg;
278 
279  /*
280  * Disable only normal interrupts (IRQ).
281  *
282  * In order to support fast interrupts (FIQ) such that they can do something
283  * useful, we have to disable the operating system support for FIQs. Having
284  * operating system support for them would require that FIQs are disabled
285  * during critical sections of the operating system and application. At this
286  * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
287  * the non critical sections of IRQs, so here they would have a small
288  * advantage. Without operating system support, the FIQs can execute at any
289  * time (of course not during the service of another FIQ). If someone needs
290  * operating system support for a FIQ, she can trigger a software interrupt and
291  * service the request in a two-step process.
292  */
293  __asm__ volatile (
294  ARM_SWITCH_TO_ARM
295  "mrs %[level], cpsr\n"
296  "orr %[arm_switch_reg], %[level], #0x80\n"
297  "msr cpsr, %[arm_switch_reg]\n"
298  ARM_SWITCH_BACK
299  : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
300  );
301 #elif defined(ARM_MULTILIB_ARCH_V7M)
302  uint32_t basepri = 0x80;
303 
304  __asm__ volatile (
305  "mrs %[level], basepri\n"
306  "msr basepri_max, %[basepri]\n"
307  : [level] "=&r" (level)
308  : [basepri] "r" (basepri)
309  );
310 #endif
311 
312  return level;
313 }
314 
315 static inline void arm_interrupt_enable( uint32_t level )
316 {
317 #if defined(ARM_MULTILIB_ARCH_V4)
318  ARM_SWITCH_REGISTERS;
319 
320  __asm__ volatile (
321  ARM_SWITCH_TO_ARM
322  "msr cpsr, %[level]\n"
323  ARM_SWITCH_BACK
324  : ARM_SWITCH_OUTPUT
325  : [level] "r" (level)
326  );
327 #elif defined(ARM_MULTILIB_ARCH_V7M)
328  __asm__ volatile (
329  "msr basepri, %[level]\n"
330  :
331  : [level] "r" (level)
332  );
333 #endif
334 }
335 
336 static inline void arm_interrupt_flash( uint32_t level )
337 {
338 #if defined(ARM_MULTILIB_ARCH_V4)
339  uint32_t arm_switch_reg;
340 
341  __asm__ volatile (
342  ARM_SWITCH_TO_ARM
343  "mrs %[arm_switch_reg], cpsr\n"
344  "msr cpsr, %[level]\n"
345  "msr cpsr, %[arm_switch_reg]\n"
346  ARM_SWITCH_BACK
347  : [arm_switch_reg] "=&r" (arm_switch_reg)
348  : [level] "r" (level)
349  );
350 #elif defined(ARM_MULTILIB_ARCH_V7M)
351  uint32_t basepri;
352 
353  __asm__ volatile (
354  "mrs %[basepri], basepri\n"
355  "msr basepri, %[level]\n"
356  "msr basepri, %[basepri]\n"
357  : [basepri] "=&r" (basepri)
358  : [level] "r" (level)
359  );
360 #endif
361 }
362 #endif /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
363 
364 #define _CPU_ISR_Disable( _isr_cookie ) \
365  do { \
366  _isr_cookie = arm_interrupt_disable(); \
367  } while (0)
368 
369 #define _CPU_ISR_Enable( _isr_cookie ) \
370  arm_interrupt_enable( _isr_cookie )
371 
372 #define _CPU_ISR_Flash( _isr_cookie ) \
373  arm_interrupt_flash( _isr_cookie )
374 
376 {
377 #if defined(ARM_MULTILIB_ARCH_V4)
378  return ( level & 0x80 ) == 0;
379 #elif defined(ARM_MULTILIB_ARCH_V7M)
380  return level == 0;
381 #endif
382 }
383 
384 void _CPU_ISR_Set_level( uint32_t level );
385 
386 uint32_t _CPU_ISR_Get_level( void );
387 
389  Context_Control *the_context,
390  void *stack_area_begin,
391  size_t stack_area_size,
392  uint32_t new_level,
393  void (*entry_point)( void ),
394  bool is_fp,
395  void *tls_area
396 );
397 
398 #define _CPU_Context_Get_SP( _context ) \
399  (_context)->register_sp
400 
401 #ifdef RTEMS_SMP
402  static inline bool _CPU_Context_Get_is_executing(
403  const Context_Control *context
404  )
405  {
406  return context->is_executing;
407  }
408 
409  static inline void _CPU_Context_Set_is_executing(
411  bool is_executing
412  )
413  {
414  context->is_executing = is_executing;
415  }
416 #endif
417 
418 #define _CPU_Context_Restart_self( _the_context ) \
419  _CPU_Context_restore( (_the_context) );
420 
421 #define _CPU_Context_Initialize_fp( _destination ) \
422  do { \
423  *(*(_destination)) = _CPU_Null_fp_context; \
424  } while (0)
425 
426 #define _CPU_Fatal_halt( _source, _err ) \
427  do { \
428  uint32_t _level; \
429  uint32_t _error = _err; \
430  _CPU_ISR_Disable( _level ); \
431  (void) _level; \
432  __asm__ volatile ("mov r0, %0\n" \
433  : "=r" (_error) \
434  : "0" (_error) \
435  : "r0" ); \
436  while (1); \
437  } while (0);
438 
442 void _CPU_Initialize( void );
443 
444 typedef void ( *CPU_ISR_handler )( void );
445 
447  uint32_t vector,
448  CPU_ISR_handler new_handler,
449  CPU_ISR_handler *old_handler
450 );
451 
456 
457 void _CPU_Context_restore( Context_Control *new_context )
459 
460 #if defined(ARM_MULTILIB_ARCH_V7M)
461  void _ARMV7M_Start_multitasking( Context_Control *heir )
463  #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
464 #endif
465 
466 #ifdef RTEMS_SMP
467  uint32_t _CPU_SMP_Initialize( void );
468 
469  bool _CPU_SMP_Start_processor( uint32_t cpu_index );
470 
471  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
472 
473  void _CPU_SMP_Prepare_start_multitasking( void );
474 
475  static inline uint32_t _CPU_SMP_Get_current_processor( void )
476  {
477  uint32_t mpidr;
478 
479  /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
480  __asm__ volatile (
481  "mrc p15, 0, %[mpidr], c0, c0, 5\n"
482  : [mpidr] "=&r" (mpidr)
483  );
484 
485  return mpidr & 0xffU;
486  }
487 
488  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
489 
490  static inline void _ARM_Send_event( void )
491  {
492  __asm__ volatile ( "sev" : : : "memory" );
493  }
494 
495  static inline void _ARM_Wait_for_event( void )
496  {
497  __asm__ volatile ( "wfe" : : : "memory" );
498  }
499 
500  static inline void _CPU_SMP_Processor_event_broadcast( void )
501  {
502  _ARM_Data_synchronization_barrier();
503  _ARM_Send_event();
504  }
505 
506  static inline void _CPU_SMP_Processor_event_receive( void )
507  {
508  _ARM_Wait_for_event();
509  _ARM_Data_memory_barrier();
510  }
511 #endif
512 
513 
514 static inline uint32_t CPU_swap_u32( uint32_t value )
515 {
516 #if defined(__thumb2__)
517  __asm__ volatile (
518  "rev %0, %0"
519  : "=r" (value)
520  : "0" (value)
521  );
522  return value;
523 #elif defined(__thumb__)
524  uint32_t byte1, byte2, byte3, byte4, swapped;
525 
526  byte4 = (value >> 24) & 0xff;
527  byte3 = (value >> 16) & 0xff;
528  byte2 = (value >> 8) & 0xff;
529  byte1 = value & 0xff;
530 
531  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
532  return swapped;
533 #else
534  uint32_t tmp = value; /* make compiler warnings go away */
535  __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
536  "BIC %1, %1, #0xff0000\n"
537  "MOV %0, %0, ROR #8\n"
538  "EOR %0, %0, %1, LSR #8\n"
539  : "=r" (value), "=r" (tmp)
540  : "0" (value), "1" (tmp));
541  return value;
542 #endif
543 }
544 
545 static inline uint16_t CPU_swap_u16( uint16_t value )
546 {
547 #if defined(__thumb2__)
548  __asm__ volatile (
549  "rev16 %0, %0"
550  : "=r" (value)
551  : "0" (value)
552  );
553  return value;
554 #else
555  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
556 #endif
557 }
558 
559 typedef uint32_t CPU_Counter_ticks;
560 
561 uint32_t _CPU_Counter_frequency( void );
562 
563 CPU_Counter_ticks _CPU_Counter_read( void );
564 
565 static inline CPU_Counter_ticks _CPU_Counter_difference(
566  CPU_Counter_ticks second,
567  CPU_Counter_ticks first
568 )
569 {
570  return second - first;
571 }
572 
573 void *_CPU_Thread_Idle_body( uintptr_t ignored );
574 
575 #if defined(ARM_MULTILIB_ARCH_V4)
576 
577 typedef enum {
578  ARM_EXCEPTION_RESET = 0,
579  ARM_EXCEPTION_UNDEF = 1,
580  ARM_EXCEPTION_SWI = 2,
581  ARM_EXCEPTION_PREF_ABORT = 3,
582  ARM_EXCEPTION_DATA_ABORT = 4,
583  ARM_EXCEPTION_RESERVED = 5,
584  ARM_EXCEPTION_IRQ = 6,
585  ARM_EXCEPTION_FIQ = 7,
586  MAX_EXCEPTIONS = 8,
587  ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0xffffffff
588 } Arm_symbolic_exception_name;
589 
590 #endif /* defined(ARM_MULTILIB_ARCH_V4) */
591 
592 typedef struct {
593  uint32_t register_fpexc;
594  uint32_t register_fpscr;
595  uint64_t register_d0;
596  uint64_t register_d1;
597  uint64_t register_d2;
598  uint64_t register_d3;
599  uint64_t register_d4;
600  uint64_t register_d5;
601  uint64_t register_d6;
602  uint64_t register_d7;
603  uint64_t register_d8;
604  uint64_t register_d9;
605  uint64_t register_d10;
606  uint64_t register_d11;
607  uint64_t register_d12;
608  uint64_t register_d13;
609  uint64_t register_d14;
610  uint64_t register_d15;
611  uint64_t register_d16;
612  uint64_t register_d17;
613  uint64_t register_d18;
614  uint64_t register_d19;
615  uint64_t register_d20;
616  uint64_t register_d21;
617  uint64_t register_d22;
618  uint64_t register_d23;
619  uint64_t register_d24;
620  uint64_t register_d25;
621  uint64_t register_d26;
622  uint64_t register_d27;
623  uint64_t register_d28;
624  uint64_t register_d29;
625  uint64_t register_d30;
626  uint64_t register_d31;
628 
629 typedef struct {
630  uint32_t register_r0;
631  uint32_t register_r1;
632  uint32_t register_r2;
633  uint32_t register_r3;
634  uint32_t register_r4;
635  uint32_t register_r5;
636  uint32_t register_r6;
637  uint32_t register_r7;
638  uint32_t register_r8;
639  uint32_t register_r9;
640  uint32_t register_r10;
641  uint32_t register_r11;
642  uint32_t register_r12;
643  uint32_t register_sp;
644  void *register_lr;
645  void *register_pc;
646 #if defined(ARM_MULTILIB_ARCH_V4)
647  uint32_t register_cpsr;
648  Arm_symbolic_exception_name vector;
649 #elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
650  uint32_t register_xpsr;
651  uint32_t vector;
652 #endif
653  const ARM_VFP_context *vfp_context;
654  uint32_t reserved_for_stack_alignment;
656 
658 
659 void _ARM_Exception_default( CPU_Exception_frame *frame );
660 
662 typedef uintptr_t CPU_Uint32ptr;
663 
664 #ifdef __cplusplus
665 }
666 #endif
667 
668 #endif /* ASM */
669 
672 #endif /* _RTEMS_SCORE_CPU_H */
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: vectorexceptions.c:45
void _CPU_ISR_Set_level(uint32_t level)
Sets the hardware interrupt level by the level value.
Definition: cpu.c:57
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: system-clocks.c:117
Thread register context.
Definition: cpu.h:194
void * _CPU_Thread_Idle_body(uintptr_t ignored)
Definition: idle-mcf5272.c:20
#define RTEMS_NO_RETURN
Definition: basedefs.h:102
void _CPU_Context_Initialize(Context_Control *context, void *stack_area_begin, size_t stack_area_size, uint32_t new_level, void(*entry_point)(void), bool is_fp, void *tls_area)
Initializes the CPU context.
Definition: epiphany-context-initialize.c:40
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:91
#define RTEMS_COMPILER_MEMORY_BARRIER()
Definition: basedefs.h:77
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:45
#define CPU_swap_u16(value)
Definition: cpu.h:642
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1210
uint32_t _CPU_ISR_Get_level(void)
Definition: cpu.c:88
unsigned context
Definition: tlb.h:108
bool _CPU_ISR_Is_enabled(uint32_t level)
Returns true if interrupts are enabled in the specified ISR level, otherwise returns false.
Definition: cpu.h:375
void _CPU_Context_restore(Context_Control *new_context) RTEMS_NO_RETURN
Definition: cpu_asm.c:111
uintptr_t CPU_Uint32ptr
Definition: cpu.h:662
uint32_t _CPU_Counter_frequency(void)
Returns the current CPU counter frequency in Hz.
Definition: system-clocks.c:112
Basic Definitions.
RTEMS_INLINE_ROUTINE void _CPU_ISR_install_vector(uint32_t vector, CPU_ISR_handler new_handler, CPU_ISR_handler *old_handler)
SPARC specific RTEMS ISR installer.
Definition: cpu.h:493
The set of registers that specifies the complete processor state.
Definition: cpu.h:629
Definition: cpu.h:592
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:66
ARM Assembler Support API.