RTEMS  5.1
cpu.h
Go to the documentation of this file.
1 
10 /*
11  *
12  *
13  * COPYRIGHT (c) 1989-2006. On-Line Applications Research Corporation (OAR).
14  *
15  * This file is based on the SPARC cpu.h file. Modifications are made
16  * to support the SPARC64 processor.
17  * COPYRIGHT (c) 2010. Gedare Bloom.
18  *
19  * The license and distribution terms for this file may be
20  * found in the file LICENSE in this distribution or at
21  * http://www.rtems.org/license/LICENSE.
22  */
23 
24 #ifndef _RTEMS_SCORE_CPU_H
25 #define _RTEMS_SCORE_CPU_H
26 
27 #ifdef __cplusplus
28 extern "C" {
29 #endif
30 
31 #include <rtems/score/basedefs.h>
32 #include <rtems/score/sparc64.h>
33 
34 /* conditional compilation parameters */
35 
36 /*
37  * Does the CPU follow the simple vectored interrupt model?
38  *
39  * If TRUE, then RTEMS allocates the vector table it internally manages.
40  * If FALSE, then the BSP is assumed to allocate and manage the vector
41  * table
42  *
43  * SPARC Specific Information:
44  *
45  * XXX document implementation including references if appropriate
46  */
47 #define CPU_SIMPLE_VECTORED_INTERRUPTS TRUE
48 
49 /*
50  * Does the RTEMS invoke the user's ISR with the vector number and
51  * a pointer to the saved interrupt frame (1) or just the vector
52  * number (0)?
53  */
54 
55 #define CPU_ISR_PASSES_FRAME_POINTER FALSE
56 
57 /*
58  * Does the CPU have hardware floating point?
59  *
60  * If TRUE, then the FLOATING_POINT task attribute is supported.
61  * If FALSE, then the FLOATING_POINT task attribute is ignored.
62  */
63 
64 #if ( SPARC_HAS_FPU == 1 )
65 #define CPU_HARDWARE_FP TRUE
66 #else
67 #define CPU_HARDWARE_FP FALSE
68 #endif
69 #define CPU_SOFTWARE_FP FALSE
70 
71 /*
72  * Are all tasks FLOATING_POINT tasks implicitly?
73  *
74  * If TRUE, then the FLOATING_POINT task attribute is assumed.
75  * If FALSE, then the FLOATING_POINT task attribute is followed.
76  */
77 
78 #define CPU_ALL_TASKS_ARE_FP FALSE
79 
80 /*
81  * Should the IDLE task have a floating point context?
82  *
83  * If TRUE, then the IDLE task is created as a FLOATING_POINT task
84  * and it has a floating point context which is switched in and out.
85  * If FALSE, then the IDLE task does not have a floating point context.
86  */
87 
88 #define CPU_IDLE_TASK_IS_FP FALSE
89 
90 /*
91  * Should the saving of the floating point registers be deferred
92  * until a context switch is made to another different floating point
93  * task?
94  *
95  * If TRUE, then the floating point context will not be stored until
96  * necessary. It will remain in the floating point registers and not
97  * disturned until another floating point task is switched to.
98  *
99  * If FALSE, then the floating point context is saved when a floating
100  * point task is switched out and restored when the next floating point
101  * task is restored. The state of the floating point registers between
102  * those two operations is not specified.
103  */
104 
105 #define CPU_USE_DEFERRED_FP_SWITCH TRUE
106 
107 #define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
108 
109 /*
110  * Does the stack grow up (toward higher addresses) or down
111  * (toward lower addresses)?
112  *
113  * If TRUE, then the grows upward.
114  * If FALSE, then the grows toward smaller addresses.
115  *
116  * The stack grows to lower addresses on the SPARC.
117  */
118 
119 #define CPU_STACK_GROWS_UP FALSE
120 
121 /* FIXME: Is this the right value? */
122 #define CPU_CACHE_LINE_BYTES 32
123 
124 /*
125  * The following is the variable attribute used to force alignment
126  * of critical data structures. On some processors it may make
127  * sense to have these aligned on tighter boundaries than
128  * the minimum requirements of the compiler in order to have as
129  * much of the critical data area as possible in a cache line.
130  *
131  * The SPARC does not appear to have particularly strict alignment
132  * requirements. This value (16) was chosen to take advantages of caches.
133  *
134  * SPARC 64 requirements on floating point alignment is at least 8,
135  * and is 16 if quad-word fp instructions are available (e.g. LDQF).
136  */
137 
138 #define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED( 16 )
139 
140 /*
141  * The following defines the number of bits actually used in the
142  * interrupt field of the task mode. How those bits map to the
143  * CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
144  *
145  * The SPARC v9 has 16 interrupt levels in the PIL field of the PSR.
146  */
147 
148 #define CPU_MODES_INTERRUPT_MASK 0x0000000F
149 
150 #define CPU_MAXIMUM_PROCESSORS 32
151 
152 /*
153  * This structure represents the organization of the minimum stack frame
154  * for the SPARC. More framing information is required in certain situaions
155  * such as when there are a large number of out parameters or when the callee
156  * must save floating point registers.
157  */
158 
159 #ifndef ASM
160 
161 typedef struct {
162  uint64_t l0;
163  uint64_t l1;
164  uint64_t l2;
165  uint64_t l3;
166  uint64_t l4;
167  uint64_t l5;
168  uint64_t l6;
169  uint64_t l7;
170  uint64_t i0;
171  uint64_t i1;
172  uint64_t i2;
173  uint64_t i3;
174  uint64_t i4;
175  uint64_t i5;
176  uint64_t i6_fp;
177  uint64_t i7;
178  void *structure_return_address;
179  /*
180  * The following are for the callee to save the register arguments in
181  * should this be necessary.
182  */
183  uint64_t saved_arg0;
184  uint64_t saved_arg1;
185  uint64_t saved_arg2;
186  uint64_t saved_arg3;
187  uint64_t saved_arg4;
188  uint64_t saved_arg5;
189  uint64_t pad0;
191 
192 #endif /* !ASM */
193 
194 #define CPU_STACK_FRAME_L0_OFFSET 0x00
195 #define CPU_STACK_FRAME_L1_OFFSET 0x08
196 #define CPU_STACK_FRAME_L2_OFFSET 0x10
197 #define CPU_STACK_FRAME_L3_OFFSET 0x18
198 #define CPU_STACK_FRAME_L4_OFFSET 0x20
199 #define CPU_STACK_FRAME_L5_OFFSET 0x28
200 #define CPU_STACK_FRAME_L6_OFFSET 0x30
201 #define CPU_STACK_FRAME_L7_OFFSET 0x38
202 #define CPU_STACK_FRAME_I0_OFFSET 0x40
203 #define CPU_STACK_FRAME_I1_OFFSET 0x48
204 #define CPU_STACK_FRAME_I2_OFFSET 0x50
205 #define CPU_STACK_FRAME_I3_OFFSET 0x58
206 #define CPU_STACK_FRAME_I4_OFFSET 0x60
207 #define CPU_STACK_FRAME_I5_OFFSET 0x68
208 #define CPU_STACK_FRAME_I6_FP_OFFSET 0x70
209 #define CPU_STACK_FRAME_I7_OFFSET 0x78
210 #define CPU_STRUCTURE_RETURN_ADDRESS_OFFSET 0x80
211 #define CPU_STACK_FRAME_SAVED_ARG0_OFFSET 0x88
212 #define CPU_STACK_FRAME_SAVED_ARG1_OFFSET 0x90
213 #define CPU_STACK_FRAME_SAVED_ARG2_OFFSET 0x98
214 #define CPU_STACK_FRAME_SAVED_ARG3_OFFSET 0xA0
215 #define CPU_STACK_FRAME_SAVED_ARG4_OFFSET 0xA8
216 #define CPU_STACK_FRAME_SAVED_ARG5_OFFSET 0xB0
217 #define CPU_STACK_FRAME_PAD0_OFFSET 0xB8
218 
219 #define SPARC64_MINIMUM_STACK_FRAME_SIZE 0xC0
220 
221 /*
222  * Contexts
223  *
224  * Generally there are 2 types of context to save.
225  * 1. Interrupt registers to save
226  * 2. Task level registers to save
227  *
228  * This means we have the following 3 context items:
229  * 1. task level context stuff:: Context_Control
230  * 2. floating point task stuff:: Context_Control_fp
231  * 3. special interrupt level context :: Context_Control_interrupt
232  *
233  * On the SPARC, we are relatively conservative in that we save most
234  * of the CPU state in the context area. The ET (enable trap) bit and
235  * the CWP (current window pointer) fields of the PSR are considered
236  * system wide resources and are not maintained on a per-thread basis.
237  */
238 
239 #ifndef ASM
240 
241 typedef struct {
242  uint64_t g1;
243  uint64_t g2;
244  uint64_t g3;
245  uint64_t g4;
246  uint64_t g5;
247  uint64_t g6;
248  uint64_t g7;
249 
250  uint64_t l0;
251  uint64_t l1;
252  uint64_t l2;
253  uint64_t l3;
254  uint64_t l4;
255  uint64_t l5;
256  uint64_t l6;
257  uint64_t l7;
258 
259  uint64_t i0;
260  uint64_t i1;
261  uint64_t i2;
262  uint64_t i3;
263  uint64_t i4;
264  uint64_t i5;
265  uint64_t i6_fp;
266  uint64_t i7;
267 
268  uint64_t o0;
269  uint64_t o1;
270  uint64_t o2;
271  uint64_t o3;
272  uint64_t o4;
273  uint64_t o5;
274  uint64_t o6_sp;
275  uint64_t o7;
276 
277  uint32_t isr_dispatch_disable;
278  uint32_t pad;
280 
281 #define _CPU_Context_Get_SP( _context ) \
282  (_context)->o6_sp
283 
284 #endif /* ASM */
285 
286 /*
287  * Offsets of fields with Context_Control for assembly routines.
288  */
289 
290 #define G1_OFFSET 0x00
291 #define G2_OFFSET 0x08
292 #define G3_OFFSET 0x10
293 #define G4_OFFSET 0x18
294 #define G5_OFFSET 0x20
295 #define G6_OFFSET 0x28
296 #define G7_OFFSET 0x30
297 
298 #define L0_OFFSET 0x38
299 #define L1_OFFSET 0x40
300 #define L2_OFFSET 0x48
301 #define L3_OFFSET 0x50
302 #define L4_OFFSET 0x58
303 #define L5_OFFSET 0x60
304 #define L6_OFFSET 0x68
305 #define L7_OFFSET 0x70
306 
307 #define I0_OFFSET 0x78
308 #define I1_OFFSET 0x80
309 #define I2_OFFSET 0x88
310 #define I3_OFFSET 0x90
311 #define I4_OFFSET 0x98
312 #define I5_OFFSET 0xA0
313 #define I6_FP_OFFSET 0xA8
314 #define I7_OFFSET 0xB0
315 
316 #define O0_OFFSET 0xB8
317 #define O1_OFFSET 0xC0
318 #define O2_OFFSET 0xC8
319 #define O3_OFFSET 0xD0
320 #define O4_OFFSET 0xD8
321 #define O5_OFFSET 0xE0
322 #define O6_SP_OFFSET 0xE8
323 #define O7_OFFSET 0xF0
324 
325 #define ISR_DISPATCH_DISABLE_STACK_OFFSET 0xF8
326 #define ISR_PAD_OFFSET 0xFC
327 
328 /*
329  * The floating point context area.
330  */
331 
332 #ifndef ASM
333 
334 typedef struct {
335  double f0; /* f0-f1 */
336  double f2; /* f2-f3 */
337  double f4; /* f4-f5 */
338  double f6; /* f6-f7 */
339  double f8; /* f8-f9 */
340  double f10; /* f10-f11 */
341  double f12; /* f12-f13 */
342  double f14; /* f14-f15 */
343  double f16; /* f16-f17 */
344  double f18; /* f18-f19 */
345  double f20; /* f20-f21 */
346  double f22; /* f22-f23 */
347  double f24; /* f24-f25 */
348  double f26; /* f26-f27 */
349  double f28; /* f28-f29 */
350  double f30; /* f30-f31 */
351  double f32;
352  double f34;
353  double f36;
354  double f38;
355  double f40;
356  double f42;
357  double f44;
358  double f46;
359  double f48;
360  double f50;
361  double f52;
362  double f54;
363  double f56;
364  double f58;
365  double f60;
366  double f62;
367  uint64_t fsr;
369 
370 #endif /* !ASM */
371 
372 /*
373  * Offsets of fields with Context_Control_fp for assembly routines.
374  */
375 
376 #define FO_OFFSET 0x00
377 #define F2_OFFSET 0x08
378 #define F4_OFFSET 0x10
379 #define F6_OFFSET 0x18
380 #define F8_OFFSET 0x20
381 #define F1O_OFFSET 0x28
382 #define F12_OFFSET 0x30
383 #define F14_OFFSET 0x38
384 #define F16_OFFSET 0x40
385 #define F18_OFFSET 0x48
386 #define F2O_OFFSET 0x50
387 #define F22_OFFSET 0x58
388 #define F24_OFFSET 0x60
389 #define F26_OFFSET 0x68
390 #define F28_OFFSET 0x70
391 #define F3O_OFFSET 0x78
392 #define F32_OFFSET 0x80
393 #define F34_OFFSET 0x88
394 #define F36_OFFSET 0x90
395 #define F38_OFFSET 0x98
396 #define F4O_OFFSET 0xA0
397 #define F42_OFFSET 0xA8
398 #define F44_OFFSET 0xB0
399 #define F46_OFFSET 0xB8
400 #define F48_OFFSET 0xC0
401 #define F5O_OFFSET 0xC8
402 #define F52_OFFSET 0xD0
403 #define F54_OFFSET 0xD8
404 #define F56_OFFSET 0xE0
405 #define F58_OFFSET 0xE8
406 #define F6O_OFFSET 0xF0
407 #define F62_OFFSET 0xF8
408 #define FSR_OFFSET 0x100
409 
410 #define CONTEXT_CONTROL_FP_SIZE 0x108
411 
412 #ifndef ASM
413 
414 /*
415  * Context saved on stack for an interrupt.
416  *
417  * NOTE: The tstate, tpc, and tnpc are saved in this structure
418  * to allow resetting the TL while still being able to return
419  * from a trap later. The PIL is saved because
420  * if this is an external interrupt, we will mask lower
421  * priority interrupts until finishing. Even though the y register
422  * is deprecated, gcc still uses it.
423  */
424 
425 typedef struct {
426  SPARC64_Minimum_stack_frame Stack_frame;
427  uint64_t tstate;
428  uint64_t tpc;
429  uint64_t tnpc;
430  uint64_t pil;
431  uint64_t y;
432  uint64_t g1;
433  uint64_t g2;
434  uint64_t g3;
435  uint64_t g4;
436  uint64_t g5;
437  uint64_t g6;
438  uint64_t g7;
439  uint64_t o0;
440  uint64_t o1;
441  uint64_t o2;
442  uint64_t o3;
443  uint64_t o4;
444  uint64_t o5;
445  uint64_t o6_sp;
446  uint64_t o7;
447  uint64_t tvec;
449 
450 #endif /* ASM */
451 
452 /*
453  * Offsets of fields with CPU_Interrupt_frame for assembly routines.
454  */
455 
456 #define ISF_TSTATE_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x00
457 #define ISF_TPC_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x08
458 #define ISF_TNPC_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x10
459 #define ISF_PIL_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x18
460 #define ISF_Y_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x20
461 #define ISF_G1_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x28
462 #define ISF_G2_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x30
463 #define ISF_G3_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x38
464 #define ISF_G4_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x40
465 #define ISF_G5_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x48
466 #define ISF_G6_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x50
467 #define ISF_G7_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x58
468 #define ISF_O0_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x60
469 #define ISF_O1_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x68
470 #define ISF_O2_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x70
471 #define ISF_O3_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x78
472 #define ISF_O4_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x80
473 #define ISF_O5_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x88
474 #define ISF_O6_SP_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x90
475 #define ISF_O7_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0x98
476 #define ISF_TVEC_OFFSET SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA0
477 
478 #define CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE SPARC64_MINIMUM_STACK_FRAME_SIZE + 0xA8
479 #ifndef ASM
480 /*
481  * This variable is contains the initialize context for the FP unit.
482  * It is filled in by _CPU_Initialize and copied into the task's FP
483  * context area during _CPU_Context_Initialize.
484  */
485 
487 
488 /*
489  * This flag is context switched with each thread. It indicates
490  * that THIS thread has an interrupt stack frame on its stack.
491  * By using this flag, we can avoid nesting more interrupt dispatching
492  * attempts on a previously interrupted thread's stack.
493  */
494 
495 extern volatile uint32_t _CPU_ISR_Dispatch_disable;
496 
497 /*
498  * The following type defines an entry in the SPARC's trap table.
499  *
500  * NOTE: The instructions chosen are RTEMS dependent although one is
501  * obligated to use two of the four instructions to perform a
502  * long jump. The other instructions load one register with the
503  * trap type (a.k.a. vector) and another with the psr.
504  */
505 /* For SPARC V9, we must use 6 of these instructions to perform a long
506  * jump, because the _handler value is now 64-bits. We also need to store
507  * temporary values in the global register set at this trap level. Because
508  * the handler runs at TL > 0 with GL > 0, it should be OK to use g2 and g3
509  * to pass parameters to ISR_Handler.
510  *
511  * The instruction sequence is now more like:
512  * rdpr %tstate, %g4
513  * setx _handler, %g2, %g3
514  * jmp %g3+0
515  * mov _vector, %g2
516  */
517 typedef struct {
518  uint32_t rdpr_tstate_g4; /* rdpr %tstate, %g4 */
519  uint32_t sethi_of_hh_handler_to_g2; /* sethi %hh(_handler), %g2 */
520  uint32_t or_g2_hm_handler_to_g2; /* or %l3, %hm(_handler), %g2 */
521  uint32_t sllx_g2_by_32_to_g2; /* sllx %g2, 32, %g2 */
522  uint32_t sethi_of_handler_to_g3; /* sethi %hi(_handler), %g3 */
523  uint32_t or_g3_g2_to_g3; /* or %g3, %g2, %g3 */
524  uint32_t jmp_to_low_of_handler_plus_g3; /* jmp %g3 + %lo(_handler) */
525  uint32_t mov_vector_g2; /* mov _vector, %g2 */
527 
528 /*
529  * This is the set of opcodes for the instructions loaded into a trap
530  * table entry. The routine which installs a handler is responsible
531  * for filling in the fields for the _handler address and the _vector
532  * trap type.
533  *
534  * The constants following this structure are masks for the fields which
535  * must be filled in when the handler is installed.
536  */
537 
539 
540 /*
541  * The size of the floating point context area.
542  */
543 
544 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
545 
546 #endif
547 
548 /*
549  * Amount of extra stack (above minimum stack size) required by
550  * MPCI receive server thread. Remember that in a multiprocessor
551  * system this thread must exist and be able to process all directives.
552  */
553 
554 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 1024
555 
556 /*
557  * This defines the number of entries in the ISR_Vector_table managed
558  * by the executive.
559  *
560  * On the SPARC, there are really only 256 vectors. However, the executive
561  * has no easy, fast, reliable way to determine which traps are synchronous
562  * and which are asynchronous. By default, synchronous traps return to the
563  * instruction which caused the interrupt. So if you install a software
564  * trap handler as an executive interrupt handler (which is desirable since
565  * RTEMS takes care of window and register issues), then the executive needs
566  * to know that the return address is to the trap rather than the instruction
567  * following the trap.
568  *
569  * So vectors 0 through 255 are treated as regular asynchronous traps which
570  * provide the "correct" return address. Vectors 256 through 512 are assumed
571  * by the executive to be synchronous and to require that the return address
572  * be fudged.
573  *
574  * If you use this mechanism to install a trap handler which must reexecute
575  * the instruction which caused the trap, then it should be installed as
576  * an asynchronous trap. This will avoid the executive changing the return
577  * address.
578  */
579 /* On SPARC v9, there are 512 vectors. The same philosophy applies to
580  * vector installation and use, we just provide a larger table.
581  */
582 #define CPU_INTERRUPT_NUMBER_OF_VECTORS 512
583 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER 1023
584 
585 #define SPARC_SYNCHRONOUS_TRAP_BIT_MASK 0x200
586 #define SPARC_ASYNCHRONOUS_TRAP( _trap ) (_trap)
587 #define SPARC_SYNCHRONOUS_TRAP( _trap ) ((_trap) + 512 )
588 
589 #define SPARC_REAL_TRAP_NUMBER( _trap ) ((_trap) % 512)
590 
591 /*
592  * This is defined if the port has a special way to report the ISR nesting
593  * level. Most ports maintain the variable _ISR_Nest_level.
594  */
595 
596 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
597 
598 /*
599  * Should be large enough to run all tests. This ensures
600  * that a "reasonable" small application should not have any problems.
601  *
602  * This appears to be a fairly generous number for the SPARC since
603  * represents a call depth of about 20 routines based on the minimum
604  * stack frame.
605  */
606 
607 #define CPU_STACK_MINIMUM_SIZE (1024*8)
608 
609 #define CPU_SIZEOF_POINTER 8
610 
611 /*
612  * CPU's worst alignment requirement for data types on a byte boundary. This
613  * alignment does not take into account the requirements for the stack.
614  *
615  * On the SPARC, this is required for double word loads and stores.
616  *
617  * Note: quad-word loads/stores need alignment of 16, but currently supported
618  * architectures do not provide HW implemented quad-word operations.
619  */
620 
621 #define CPU_ALIGNMENT 8
622 
623 /*
624  * This number corresponds to the byte alignment requirement for the
625  * heap handler. This alignment requirement may be stricter than that
626  * for the data types alignment specified by CPU_ALIGNMENT. It is
627  * common for the heap to follow the same alignment requirement as
628  * CPU_ALIGNMENT. If the CPU_ALIGNMENT is strict enough for the heap,
629  * then this should be set to CPU_ALIGNMENT.
630  *
631  * NOTE: This does not have to be a power of 2. It does have to
632  * be greater or equal to than CPU_ALIGNMENT.
633  */
634 
635 #define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
636 
637 /*
638  * This number corresponds to the byte alignment requirement for the
639  * stack. This alignment requirement may be stricter than that for the
640  * data types alignment specified by CPU_ALIGNMENT. If the CPU_ALIGNMENT
641  * is strict enough for the stack, then this should be set to 0.
642  *
643  * NOTE: This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
644  *
645  * The alignment restrictions for the SPARC are not that strict but this
646  * should unsure that the stack is always sufficiently alignment that the
647  * window overflow, underflow, and flush routines can use double word loads
648  * and stores.
649  */
650 
651 #define CPU_STACK_ALIGNMENT 16
652 
653 #define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES
654 
655 #ifndef ASM
656 
657 /*
658  * ISR handler macros
659  */
660 
661 /*
662  * Support routine to initialize the RTEMS vector table after it is allocated.
663  */
664 
665 #define _CPU_Initialize_vectors()
666 
667 /*
668  * Disable all interrupts for a critical section. The previous
669  * level is returned in _level.
670  */
671 
672  #define _CPU_ISR_Disable( _level ) \
673  (_level) = sparc_disable_interrupts()
674 
675 /*
676  * Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
677  * This indicates the end of a critical section. The parameter
678  * _level is not modified.
679  */
680 
681 #define _CPU_ISR_Enable( _level ) \
682  sparc_enable_interrupts( _level )
683 
684 /*
685  * This temporarily restores the interrupt to _level before immediately
686  * disabling them again. This is used to divide long critical
687  * sections into two or more parts. The parameter _level is not
688  * modified.
689  */
690 
691 #define _CPU_ISR_Flash( _level ) \
692  sparc_flash_interrupts( _level )
693 
695 {
696  return ( level & SPARC_PSTATE_IE_MASK ) != 0;
697 }
698 
699 /*
700  * Map interrupt level in task mode onto the hardware that the CPU
701  * actually provides. Currently, interrupt levels which do not
702  * map onto the CPU in a straight fashion are undefined.
703  */
704 
705 #define _CPU_ISR_Set_level( _newlevel ) \
706  sparc_enable_interrupts( _newlevel)
707 
708 uint32_t _CPU_ISR_Get_level( void );
709 
710 /* end of ISR handler macros */
711 
712 /* Context handler macros */
713 
714 /*
715  * Initialize the context to a state suitable for starting a
716  * task after a context restore operation. Generally, this
717  * involves:
718  *
719  * - setting a starting address
720  * - preparing the stack
721  * - preparing the stack and frame pointers
722  * - setting the proper interrupt level in the context
723  * - initializing the floating point context
724  *
725  * NOTE: Implemented as a subroutine for the SPARC port.
726  */
727 
729  Context_Control *the_context,
730  void *stack_base,
731  uint32_t size,
732  uint32_t new_level,
733  void *entry_point,
734  bool is_fp,
735  void *tls_area
736 );
737 
738 /*
739  * This macro is invoked from _Thread_Handler to do whatever CPU
740  * specific magic is required that must be done in the context of
741  * the thread when it starts.
742  *
743  * On the SPARC, this is setting the frame pointer so GDB is happy.
744  * Make GDB stop unwinding at _Thread_Handler, previous register window
745  * Frame pointer is 0 and calling address must be a function with starting
746  * with a SAVE instruction. If return address is leaf-function (no SAVE)
747  * GDB will not look at prev reg window fp.
748  *
749  * _Thread_Handler is known to start with SAVE.
750  */
751 
752 #define _CPU_Context_Initialization_at_thread_begin() \
753  do { \
754  __asm__ volatile ("set _Thread_Handler,%%i7\n"::); \
755  } while (0)
756 
757 /*
758  * This routine is responsible for somehow restarting the currently
759  * executing task.
760  *
761  * On the SPARC, this is is relatively painless but requires a small
762  * amount of wrapper code before using the regular restore code in
763  * of the context switch.
764  */
765 
766 #define _CPU_Context_Restart_self( _the_context ) \
767  _CPU_Context_restore( (_the_context) );
768 
769 /*
770  * This routine initializes the FP context area passed to it to.
771  *
772  * The SPARC allows us to use the simple initialization model
773  * in which an "initial" FP context was saved into _CPU_Null_fp_context
774  * at CPU initialization and it is simply copied into the destination
775  * context.
776  */
777 
778 #define _CPU_Context_Initialize_fp( _destination ) \
779  do { \
780  *(*(_destination)) = _CPU_Null_fp_context; \
781  } while (0)
782 
783 /* end of Context handler macros */
784 
785 /* Fatal Error manager macros */
786 
787 /*
788  * This routine copies _error into a known place -- typically a stack
789  * location or a register, optionally disables interrupts, and
790  * halts/stops the CPU.
791  */
792 
793 #define _CPU_Fatal_halt( _source, _error ) \
794  do { \
795  uint32_t level; \
796  \
797  level = sparc_disable_interrupts(); \
798  __asm__ volatile ( "mov %0, %%g1 " : "=r" (level) : "0" (level) ); \
799  while (1); /* loop forever */ \
800  } while (0)
801 
802 /* end of Fatal Error manager macros */
803 
804 /* Bitfield handler macros */
805 
806 /*
807  * The SPARC port uses the generic C algorithm for bitfield scan if the
808  * CPU model does not have a scan instruction.
809  */
810 
811 #if ( SPARC_HAS_BITSCAN == 0 )
812 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
813 #else
814 #error "scan instruction not currently supported by RTEMS!!"
815 #endif
816 
817 /* end of Bitfield handler macros */
818 
819 /* Priority handler handler macros */
820 
821 /*
822  * The SPARC port uses the generic C algorithm for bitfield scan if the
823  * CPU model does not have a scan instruction.
824  */
825 
826 #if ( SPARC_HAS_BITSCAN == 1 )
827 #error "scan instruction not currently supported by RTEMS!!"
828 #endif
829 
830 /* end of Priority handler macros */
831 
832 /* functions */
833 
834 /*
835  * _CPU_Initialize
836  *
837  * This routine performs CPU dependent initialization.
838  */
839 
840 void _CPU_Initialize(void);
841 
842 typedef void ( *CPU_ISR_raw_handler )( void );
843 
845  uint32_t vector,
846  CPU_ISR_raw_handler new_handler,
847  CPU_ISR_raw_handler *old_handler
848 );
849 
850 typedef void ( *CPU_ISR_handler )( uint32_t );
851 
853  uint32_t vector,
854  CPU_ISR_handler new_handler,
855  CPU_ISR_handler *old_handler
856 );
857 
858 void *_CPU_Thread_Idle_body( uintptr_t ignored );
859 
860 /*
861  * _CPU_Context_switch
862  *
863  * This routine switches from the run context to the heir context.
864  */
865 
867  Context_Control *run,
868  Context_Control *heir
869 );
870 
871 /*
872  * _CPU_Context_restore
873  *
874  * This routine is generally used only to restart self in an
875  * efficient manner.
876  */
877 
879  Context_Control *new_context
881 
882 /*
883  * _CPU_Context_save_fp
884  *
885  * This routine saves the floating point context passed to it.
886  */
887 
889  Context_Control_fp **fp_context_ptr
890 );
891 
892 /*
893  * _CPU_Context_restore_fp
894  *
895  * This routine restores the floating point context passed to it.
896  */
897 
899  Context_Control_fp **fp_context_ptr
900 );
901 
902 /* FIXME */
904 
906 
907 /*
908  * CPU_swap_u32
909  *
910  * The following routine swaps the endian format of an unsigned int.
911  * It must be static because it is referenced indirectly.
912  *
913  * This version will work on any processor, but if you come across a better
914  * way for the SPARC PLEASE use it. The most common way to swap a 32-bit
915  * entity as shown below is not any more efficient on the SPARC.
916  *
917  * swap least significant two bytes with 16-bit rotate
918  * swap upper and lower 16-bits
919  * swap most significant two bytes with 16-bit rotate
920  *
921  * It is not obvious how the SPARC can do significantly better than the
922  * generic code. gcc 2.7.0 only generates about 12 instructions for the
923  * following code at optimization level four (i.e. -O4).
924  */
925 
926 static inline uint32_t CPU_swap_u32(
927  uint32_t value
928 )
929 {
930  uint32_t byte1, byte2, byte3, byte4, swapped;
931 
932  byte4 = (value >> 24) & 0xff;
933  byte3 = (value >> 16) & 0xff;
934  byte2 = (value >> 8) & 0xff;
935  byte1 = value & 0xff;
936 
937  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
938  return( swapped );
939 }
940 
941 #define CPU_swap_u16( value ) \
942  (((value&0xff) << 8) | ((value >> 8)&0xff))
943 
944 typedef uint32_t CPU_Counter_ticks;
945 
946 uint32_t _CPU_Counter_frequency( void );
947 
948 CPU_Counter_ticks _CPU_Counter_read( void );
949 
950 static inline CPU_Counter_ticks _CPU_Counter_difference(
951  CPU_Counter_ticks second,
952  CPU_Counter_ticks first
953 )
954 {
955  return second - first;
956 }
957 
959 typedef uintptr_t CPU_Uint32ptr;
960 
961 #endif /* ASM */
962 
963 #ifdef __cplusplus
964 }
965 #endif
966 
967 #endif
#define _CPU_Context_restore_fp(_fp_context_ptr)
Nothing to do due to the synchronous or lazy floating point switch.
Definition: cpu.h:904
void _CPU_Exception_frame_print(const CPU_Exception_frame *frame)
Prints the exception frame via printk().
Definition: vectorexceptions.c:45
Definition: cpu.h:627
CPU_Counter_ticks _CPU_Counter_read(void)
Returns the current CPU counter value.
Definition: system-clocks.c:117
Thread register context.
Definition: cpu.h:194
void * _CPU_Thread_Idle_body(uintptr_t ignored)
Definition: idle-mcf5272.c:20
Interrupt stack frame (ISF).
Definition: cpu.h:191
#define RTEMS_NO_RETURN
Definition: basedefs.h:102
void _CPU_Context_Initialize(Context_Control *context, void *stack_area_begin, size_t stack_area_size, uint32_t new_level, void(*entry_point)(void), bool is_fp, void *tls_area)
Initializes the CPU context.
Definition: epiphany-context-initialize.c:40
void _CPU_Context_switch(Context_Control *run, Context_Control *heir)
CPU switch context.
Definition: cpu_asm.c:91
Definition: cpu.h:161
Information Required to Build RTEMS for a Particular Member of the SPARC Family.
#define _CPU_Context_save_fp(_fp_context_ptr)
Nothing to do due to the synchronous or lazy floating point switch.
Definition: cpu.h:898
void _CPU_Initialize(void)
CPU initialization.
Definition: cpu.c:45
SPARC basic context.
Definition: cpu.h:194
const CPU_Trap_table_entry _CPU_Trap_slot_template
Definition: cpu.c:156
uint32_t CPU_Counter_ticks
Unsigned integer type for CPU counter values.
Definition: cpu.h:1210
uint32_t _CPU_ISR_Get_level(void)
Definition: cpu.c:88
RTEMS_INLINE_ROUTINE void _CPU_ISR_install_raw_handler(uint32_t vector, CPU_ISR_raw_handler new_handler, CPU_ISR_raw_handler *old_handler)
SPARC specific raw ISR installer.
Definition: cpu.h:649
Context_Control_fp _CPU_Null_fp_context
bool _CPU_ISR_Is_enabled(uint32_t level)
Returns true if interrupts are enabled in the specified ISR level, otherwise returns false.
Definition: cpu.h:375
void _CPU_Context_restore(Context_Control *new_context) RTEMS_NO_RETURN
Definition: cpu_asm.c:111
uintptr_t CPU_Uint32ptr
Definition: cpu.h:662
uint32_t _CPU_Counter_frequency(void)
Returns the current CPU counter frequency in Hz.
Definition: system-clocks.c:112
Basic Definitions.
unsigned size
Definition: tte.h:74
RTEMS_INLINE_ROUTINE void _CPU_ISR_install_vector(uint32_t vector, CPU_ISR_handler new_handler, CPU_ISR_handler *old_handler)
SPARC specific RTEMS ISR installer.
Definition: cpu.h:493
The set of registers that specifies the complete processor state.
Definition: cpu.h:629
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:66