RTEMS 6.1-rc1
cpu.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * This include file contains information pertaining to the ARM
14 * processor.
15 *
16 * Copyright (C) 2009, 2017 embedded brains GmbH & Co. KG
17 *
18 * Copyright (c) 2007 Ray Xu <Rayx.cn@gmail.com>
19 *
20 * Copyright (c) 2006 On-Line Applications Research Corporation (OAR)
21 *
22 * Copyright (c) 2002 Advent Networks, Inc.
23 * Jay Monkman <jmonkman@adventnetworks.com>
24 *
25 * COPYRIGHT (c) 2000 Canon Research Centre France SA.
26 * Emmanuel Raguet, mailto:raguet@crf.canon.fr
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
38 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
41 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
42 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
43 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
47 * POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51#ifndef _RTEMS_SCORE_CPU_H
52#define _RTEMS_SCORE_CPU_H
53
55#if defined(RTEMS_PARAVIRT)
56#include <rtems/score/paravirt.h>
57#endif
58#include <rtems/score/arm.h>
59
66#if defined(ARM_MULTILIB_ARCH_V4)
67
68#if defined(__thumb__) && !defined(__thumb2__)
69 #define ARM_SWITCH_REGISTERS uint32_t arm_switch_reg
70 #define ARM_SWITCH_TO_ARM ".align 2\nbx pc\n.arm\n"
71 #define ARM_SWITCH_BACK "add %[arm_switch_reg], pc, #1\nbx %[arm_switch_reg]\n.thumb\n"
72 #define ARM_SWITCH_OUTPUT [arm_switch_reg] "=&r" (arm_switch_reg)
73 #define ARM_SWITCH_ADDITIONAL_OUTPUT , ARM_SWITCH_OUTPUT
74#else
75 #define ARM_SWITCH_REGISTERS
76 #define ARM_SWITCH_TO_ARM
77 #define ARM_SWITCH_BACK
78 #define ARM_SWITCH_OUTPUT
79 #define ARM_SWITCH_ADDITIONAL_OUTPUT
80#endif
81
87#define ARM_PSR_N (1 << 31)
88#define ARM_PSR_Z (1 << 30)
89#define ARM_PSR_C (1 << 29)
90#define ARM_PSR_V (1 << 28)
91#define ARM_PSR_Q (1 << 27)
92#define ARM_PSR_J (1 << 24)
93#define ARM_PSR_GE_SHIFT 16
94#define ARM_PSR_GE_MASK (0xf << ARM_PSR_GE_SHIFT)
95#define ARM_PSR_E (1 << 9)
96#define ARM_PSR_A (1 << 8)
97#define ARM_PSR_I (1 << 7)
98#define ARM_PSR_F (1 << 6)
99#define ARM_PSR_T (1 << 5)
100#define ARM_PSR_M_SHIFT 0
101#define ARM_PSR_M_MASK (0x1f << ARM_PSR_M_SHIFT)
102#define ARM_PSR_M_USR 0x10
103#define ARM_PSR_M_FIQ 0x11
104#define ARM_PSR_M_IRQ 0x12
105#define ARM_PSR_M_SVC 0x13
106#define ARM_PSR_M_ABT 0x17
107#define ARM_PSR_M_HYP 0x1a
108#define ARM_PSR_M_UND 0x1b
109#define ARM_PSR_M_SYS 0x1f
110
113#endif /* defined(ARM_MULTILIB_ARCH_V4) */
114
115/*
116 * The ARM uses the PIC interrupt model.
117 */
118#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
119
120#define CPU_ISR_PASSES_FRAME_POINTER FALSE
121
122#define CPU_HARDWARE_FP FALSE
123
124#define CPU_SOFTWARE_FP FALSE
125
126#define CPU_ALL_TASKS_ARE_FP FALSE
127
128#define CPU_IDLE_TASK_IS_FP FALSE
129
130#define CPU_USE_DEFERRED_FP_SWITCH FALSE
131
132#define CPU_ENABLE_ROBUST_THREAD_DISPATCH TRUE
133
134#define CPU_STACK_GROWS_UP FALSE
135
136#if defined(ARM_MULTILIB_CACHE_LINE_MAX_64)
137 #define CPU_CACHE_LINE_BYTES 64
138#else
139 #define CPU_CACHE_LINE_BYTES 32
140#endif
141
142#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES )
143
144#define CPU_MODES_INTERRUPT_MASK 0x1
145
146#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
147
148#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
149
150#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
151
152/* AAPCS, section 4.1, Fundamental Data Types */
153#define CPU_SIZEOF_POINTER 4
154
155/* AAPCS, section 4.1, Fundamental Data Types */
156#define CPU_ALIGNMENT 8
157
158#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
159
160/* AAPCS, section 5.2.1.2, Stack constraints at a public interface */
161#define CPU_STACK_ALIGNMENT 8
162
163#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
164
165/*
166 * Bitfield handler macros.
167 *
168 * If we had a particularly fast function for finding the first
169 * bit set in a word, it would go here. Since we don't (*), we'll
170 * just use the universal macros.
171 *
172 * (*) On ARM V5 and later, there's a CLZ function which could be
173 * used to implement much quicker than the default macro.
174 */
175
176#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
177
178#define CPU_USE_LIBC_INIT_FINI_ARRAY TRUE
179
180#define CPU_MAXIMUM_PROCESSORS 32
181
182#define ARM_CONTEXT_CONTROL_THREAD_ID_OFFSET 44
183
184#ifdef ARM_MULTILIB_VFP
185 #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
186#endif
187
188#ifdef ARM_MULTILIB_ARCH_V4
189 #define ARM_CONTEXT_CONTROL_ISR_DISPATCH_DISABLE 40
190#endif
191
192#ifdef RTEMS_SMP
193 #if defined(ARM_MULTILIB_VFP)
194 #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
195 #else
196 #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
197 #endif
198#endif
199
200#define ARM_EXCEPTION_FRAME_SIZE 80
201
202#define ARM_EXCEPTION_FRAME_REGISTER_SP_OFFSET 52
203
204#define ARM_EXCEPTION_FRAME_VFP_CONTEXT_OFFSET 72
205
206#define ARM_VFP_CONTEXT_SIZE 264
207
208#ifndef ASM
209
210#ifdef __cplusplus
211extern "C" {
212#endif
213
214typedef struct {
215#if defined(ARM_MULTILIB_ARCH_V4)
216 uint32_t register_r4;
217 uint32_t register_r5;
218 uint32_t register_r6;
219 uint32_t register_r7;
220 uint32_t register_r8;
221 uint32_t register_r9;
222 uint32_t register_r10;
223 uint32_t register_fp;
224 uint32_t register_sp;
225 uint32_t register_lr;
226 uint32_t isr_dispatch_disable;
227#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
228 uint32_t register_r4;
229 uint32_t register_r5;
230 uint32_t register_r6;
231 uint32_t register_r7;
232 uint32_t register_r8;
233 uint32_t register_r9;
234 uint32_t register_r10;
235 uint32_t register_r11;
236 void *register_lr;
237 void *register_sp;
238 uint32_t isr_nest_level;
239#else
240 void *register_sp;
241#endif
242 uint32_t thread_id;
243#ifdef ARM_MULTILIB_VFP
244 uint64_t register_d8;
245 uint64_t register_d9;
246 uint64_t register_d10;
247 uint64_t register_d11;
248 uint64_t register_d12;
249 uint64_t register_d13;
250 uint64_t register_d14;
251 uint64_t register_d15;
252#endif
253#ifdef RTEMS_SMP
254 volatile bool is_executing;
255#endif
257
258static inline void _ARM_Data_memory_barrier( void )
259{
260#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
261 __asm__ volatile ( "dmb" : : : "memory" );
262#else
264#endif
265}
266
267static inline void _ARM_Data_synchronization_barrier( void )
268{
269#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
270 __asm__ volatile ( "dsb" : : : "memory" );
271#else
273#endif
274}
275
276static inline void _ARM_Instruction_synchronization_barrier( void )
277{
278#ifdef ARM_MULTILIB_HAS_BARRIER_INSTRUCTIONS
279 __asm__ volatile ( "isb" : : : "memory" );
280#else
282#endif
283}
284
285#if defined(ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE)
286uint32_t arm_interrupt_disable( void );
287void arm_interrupt_enable( uint32_t level );
288void arm_interrupt_flash( uint32_t level );
289#else
290static inline uint32_t arm_interrupt_disable( void )
291{
292 uint32_t level;
293
294#if defined(ARM_MULTILIB_ARCH_V4)
295 /*
296 * Disable only normal interrupts (IRQ).
297 *
298 * In order to support fast interrupts (FIQ) such that they can do something
299 * useful, we have to disable the operating system support for FIQs. Having
300 * operating system support for them would require that FIQs are disabled
301 * during critical sections of the operating system and application. At this
302 * level IRQs and FIQs would be equal. It is true that FIQs could interrupt
303 * the non critical sections of IRQs, so here they would have a small
304 * advantage. Without operating system support, the FIQs can execute at any
305 * time (of course not during the service of another FIQ). If someone needs
306 * operating system support for a FIQ, she can trigger a software interrupt and
307 * service the request in a two-step process.
308 */
309#if __ARM_ARCH >= 7
310 __asm__ volatile (
311 "mrs %0, cpsr\n"
312 "cpsid i\n"
313 "isb"
314 : "=&r" (level)
315 );
316#else
317 uint32_t arm_switch_reg;
318
319 __asm__ volatile (
320 ARM_SWITCH_TO_ARM
321 "mrs %[level], cpsr\n"
322 "orr %[arm_switch_reg], %[level], #0x80\n"
323 "msr cpsr, %[arm_switch_reg]\n"
324 ARM_SWITCH_BACK
325 : [arm_switch_reg] "=&r" (arm_switch_reg), [level] "=&r" (level)
326 );
327#endif
328#elif defined(ARM_MULTILIB_ARCH_V7M)
329 uint32_t basepri = 0x80;
330
331 __asm__ volatile (
332 "mrs %[level], basepri\n"
333 "msr basepri_max, %[basepri]\n"
334 : [level] "=&r" (level)
335 : [basepri] "r" (basepri)
336 );
337#endif
338
339 return level;
340}
341
342static inline void arm_interrupt_enable( uint32_t level )
343{
344#if defined(ARM_MULTILIB_ARCH_V4)
345 ARM_SWITCH_REGISTERS;
346
347 __asm__ volatile (
348 ARM_SWITCH_TO_ARM
349 "msr cpsr, %[level]\n"
350 ARM_SWITCH_BACK
351 : ARM_SWITCH_OUTPUT
352 : [level] "r" (level)
353 );
354#elif defined(ARM_MULTILIB_ARCH_V7M)
355 __asm__ volatile (
356 "msr basepri, %[level]\n"
357 :
358 : [level] "r" (level)
359 );
360#endif
361}
362
363static inline void arm_interrupt_flash( uint32_t level )
364{
365#if defined(ARM_MULTILIB_ARCH_V4)
366 uint32_t arm_switch_reg;
367
368 __asm__ volatile (
369 ARM_SWITCH_TO_ARM
370 "mrs %[arm_switch_reg], cpsr\n"
371 "msr cpsr, %[level]\n"
372 "msr cpsr, %[arm_switch_reg]\n"
373 ARM_SWITCH_BACK
374 : [arm_switch_reg] "=&r" (arm_switch_reg)
375 : [level] "r" (level)
376 );
377#elif defined(ARM_MULTILIB_ARCH_V7M)
378 uint32_t basepri;
379
380 __asm__ volatile (
381 "mrs %[basepri], basepri\n"
382 "msr basepri, %[level]\n"
383 "msr basepri, %[basepri]\n"
384 : [basepri] "=&r" (basepri)
385 : [level] "r" (level)
386 );
387#endif
388}
389#endif /* !ARM_DISABLE_INLINE_ISR_DISABLE_ENABLE */
390
391#define _CPU_ISR_Disable( _isr_cookie ) \
392 do { \
393 _isr_cookie = arm_interrupt_disable(); \
394 } while (0)
395
396#define _CPU_ISR_Enable( _isr_cookie ) \
397 arm_interrupt_enable( _isr_cookie )
398
399#define _CPU_ISR_Flash( _isr_cookie ) \
400 arm_interrupt_flash( _isr_cookie )
401
402static inline bool _CPU_ISR_Is_enabled( uint32_t level )
403{
404#if defined(ARM_MULTILIB_ARCH_V4)
405 return ( level & 0x80 ) == 0;
406#elif defined(ARM_MULTILIB_ARCH_V7M)
407 return level == 0;
408#endif
409}
410
411void _CPU_ISR_Set_level( uint32_t level );
412
413uint32_t _CPU_ISR_Get_level( void );
414
416 Context_Control *the_context,
417 void *stack_area_begin,
418 size_t stack_area_size,
419 uint32_t new_level,
420 void (*entry_point)( void ),
421 bool is_fp,
422 void *tls_area
423);
424
425#define _CPU_Context_Get_SP( _context ) \
426 (_context)->register_sp
427
428#ifdef RTEMS_SMP
429 static inline bool _CPU_Context_Get_is_executing(
431 )
432 {
433 return context->is_executing;
434 }
435
436 static inline void _CPU_Context_Set_is_executing(
438 bool is_executing
439 )
440 {
441 context->is_executing = is_executing;
442 }
443
444 RTEMS_NO_RETURN void _ARM_Start_multitasking( Context_Control *heir );
445
446 #define _CPU_Start_multitasking( _heir ) _ARM_Start_multitasking( _heir )
447#endif
448
449#define _CPU_Context_Restart_self( _the_context ) \
450 _CPU_Context_restore( (_the_context) );
451
452#define _CPU_Context_Initialize_fp( _destination ) \
453 do { \
454 *(*(_destination)) = _CPU_Null_fp_context; \
455 } while (0)
456
460void _CPU_Initialize( void );
461
462typedef void ( *CPU_ISR_handler )( void );
463
465 uint32_t vector,
466 CPU_ISR_handler new_handler,
467 CPU_ISR_handler *old_handler
468);
469
474
475RTEMS_NO_RETURN void _CPU_Context_switch_no_return(
476 Context_Control *executing,
477 Context_Control *heir
478);
479
481
482#if defined(ARM_MULTILIB_ARCH_V7M)
483 RTEMS_NO_RETURN void _ARMV7M_Start_multitasking( Context_Control *heir );
484 #define _CPU_Start_multitasking _ARMV7M_Start_multitasking
485#endif
486
487#ifdef RTEMS_SMP
488 uint32_t _CPU_SMP_Initialize( void );
489
490 bool _CPU_SMP_Start_processor( uint32_t cpu_index );
491
492 void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );
493
494 void _CPU_SMP_Prepare_start_multitasking( void );
495
496 static inline uint32_t _CPU_SMP_Get_current_processor( void )
497 {
498 uint32_t mpidr;
499
500 /* Use ARMv7 Multiprocessor Affinity Register (MPIDR) */
501 __asm__ volatile (
502 "mrc p15, 0, %[mpidr], c0, c0, 5\n"
503 : [mpidr] "=&r" (mpidr)
504 );
505
506 return mpidr & 0xffU;
507 }
508
509 void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );
510
511 static inline void _ARM_Send_event( void )
512 {
513 __asm__ volatile ( "sev" : : : "memory" );
514 }
515
516 static inline void _ARM_Wait_for_event( void )
517 {
518 __asm__ volatile ( "wfe" : : : "memory" );
519 }
520#endif
521
522
523static inline uint32_t CPU_swap_u32( uint32_t value )
524{
525#if defined(__thumb2__)
526 __asm__ volatile (
527 "rev %0, %0"
528 : "=r" (value)
529 : "0" (value)
530 );
531 return value;
532#elif defined(__thumb__)
533 uint32_t byte1, byte2, byte3, byte4, swapped;
534
535 byte4 = (value >> 24) & 0xff;
536 byte3 = (value >> 16) & 0xff;
537 byte2 = (value >> 8) & 0xff;
538 byte1 = value & 0xff;
539
540 swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
541 return swapped;
542#else
543 uint32_t tmp = value; /* make compiler warnings go away */
544 __asm__ volatile ("EOR %1, %0, %0, ROR #16\n"
545 "BIC %1, %1, #0xff0000\n"
546 "MOV %0, %0, ROR #8\n"
547 "EOR %0, %0, %1, LSR #8\n"
548 : "=r" (value), "=r" (tmp)
549 : "0" (value), "1" (tmp));
550 return value;
551#endif
552}
553
554static inline uint16_t CPU_swap_u16( uint16_t value )
555{
556#if defined(__thumb2__)
557 __asm__ volatile (
558 "rev16 %0, %0"
559 : "=r" (value)
560 : "0" (value)
561 );
562 return value;
563#else
564 return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
565#endif
566}
567
568typedef uint32_t CPU_Counter_ticks;
569
570uint32_t _CPU_Counter_frequency( void );
571
572CPU_Counter_ticks _CPU_Counter_read( void );
573
574void *_CPU_Thread_Idle_body( uintptr_t ignored );
575
576#if defined(ARM_MULTILIB_ARCH_V4)
577
578typedef enum {
579 ARM_EXCEPTION_RESET = 0,
580 ARM_EXCEPTION_UNDEF = 1,
581 ARM_EXCEPTION_SWI = 2,
582 ARM_EXCEPTION_PREF_ABORT = 3,
583 ARM_EXCEPTION_DATA_ABORT = 4,
584 ARM_EXCEPTION_RESERVED = 5,
585 ARM_EXCEPTION_IRQ = 6,
586 ARM_EXCEPTION_FIQ = 7,
587 MAX_EXCEPTIONS = 8,
588 ARM_EXCEPTION_MAKE_ENUM_32_BIT = 0x7fffffff
589} Arm_symbolic_exception_name;
590
591#endif /* defined(ARM_MULTILIB_ARCH_V4) */
592
593typedef struct {
594 uint32_t register_fpexc;
595 uint32_t register_fpscr;
596 uint64_t register_d0;
597 uint64_t register_d1;
598 uint64_t register_d2;
599 uint64_t register_d3;
600 uint64_t register_d4;
601 uint64_t register_d5;
602 uint64_t register_d6;
603 uint64_t register_d7;
604 uint64_t register_d8;
605 uint64_t register_d9;
606 uint64_t register_d10;
607 uint64_t register_d11;
608 uint64_t register_d12;
609 uint64_t register_d13;
610 uint64_t register_d14;
611 uint64_t register_d15;
612 uint64_t register_d16;
613 uint64_t register_d17;
614 uint64_t register_d18;
615 uint64_t register_d19;
616 uint64_t register_d20;
617 uint64_t register_d21;
618 uint64_t register_d22;
619 uint64_t register_d23;
620 uint64_t register_d24;
621 uint64_t register_d25;
622 uint64_t register_d26;
623 uint64_t register_d27;
624 uint64_t register_d28;
625 uint64_t register_d29;
626 uint64_t register_d30;
627 uint64_t register_d31;
629
630typedef struct {
631 uint32_t register_r0;
632 uint32_t register_r1;
633 uint32_t register_r2;
634 uint32_t register_r3;
635 uint32_t register_r4;
636 uint32_t register_r5;
637 uint32_t register_r6;
638 uint32_t register_r7;
639 uint32_t register_r8;
640 uint32_t register_r9;
641 uint32_t register_r10;
642 uint32_t register_r11;
643 uint32_t register_r12;
644 uint32_t register_sp;
645 void *register_lr;
646 void *register_pc;
647#if defined(ARM_MULTILIB_ARCH_V4)
648 uint32_t register_cpsr;
649 Arm_symbolic_exception_name vector;
650#elif defined(ARM_MULTILIB_ARCH_V6M) || defined(ARM_MULTILIB_ARCH_V7M)
651 uint32_t register_xpsr;
652 uint32_t vector;
653#endif
654 const ARM_VFP_context *vfp_context;
655 uint32_t reserved_for_stack_alignment;
657
659
660void _ARM_Exception_default( CPU_Exception_frame *frame );
661
663typedef uintptr_t CPU_Uint32ptr;
664
665#ifdef __cplusplus
666}
667#endif
668
669#endif /* ASM */
670
673#endif /* _RTEMS_SCORE_CPU_H */
This header file provides defines derived from ARM multilib defines.
This header file provides basic definitions used by the API and the implementation.
#define RTEMS_COMPILER_MEMORY_BARRIER()
This macro forbids the compiler to reorder read and write commands around it.
Definition: basedefs.h:258
#define RTEMS_NO_RETURN
Tells the compiler in a function declaration that this function does not return.
Definition: basedefs.h:386
void _CPU_ISR_Set_level(uint32_t level)
Sets the hardware interrupt level by the level value.
Definition: cpu.c:149
void * _CPU_Thread_Idle_body(uintptr_t ignored)
Definition: idle-mcf5272.c:39
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:45
uintptr_t CPU_Uint32ptr
Definition: cpu.h:557
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Definition: vectorexceptions.c:64
uint32_t _CPU_Counter_frequency(void)
Gets the current CPU counter frequency in Hz.
Definition: system-clocks.c:125
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:110
CPU_Counter_ticks _CPU_Counter_read(void)
Gets the current CPU counter value.
Definition: system-clocks.c:130
void _CPU_ISR_install_vector(uint32_t vector, CPU_ISR_handler new_handler, CPU_ISR_handler *old_handler)
SPARC specific RTEMS ISR installer.
Definition: idt.c:106
void _CPU_Context_Initialize(Context_Control *context, void *stack_area_begin, size_t stack_area_size, uint32_t new_level, void(*entry_point)(void), bool is_fp, void *tls_area)
Initializes the CPU context.
Definition: cpu.c:167
RTEMS_NO_RETURN void _CPU_Context_restore(Context_Control *new_context)
Definition: cpu_asm.c:130
#define CPU_swap_u16(value)
Definition: cpu.h:597
uint32_t _CPU_ISR_Get_level(void)
Definition: cpu.c:165
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1294
rtems_termios_device_context * context
Definition: console-config.c:62
Definition: cpu.h:593
The set of registers that specifies the complete processor state.
Definition: cpu.h:446
Thread register context.
Definition: cpu.h:169