RTEMS
cpu.c
Go to the documentation of this file.
1 
7 /*
8  * COPYRIGHT (c) 1989-2007.
9  * On-Line Applications Research Corporation (OAR).
10  *
11  * Copyright (c) 2017 embedded brains GmbH
12  *
13  * The license and distribution terms for this file may be
14  * found in the file LICENSE in this distribution or at
15  * http://www.rtems.org/license/LICENSE.
16  */
17 
18 #ifdef HAVE_CONFIG_H
19 #include "config.h"
20 #endif
21 
22 #include <rtems/score/isr.h>
23 #include <rtems/score/percpu.h>
24 #include <rtems/score/tls.h>
25 #include <rtems/score/thread.h>
26 #include <rtems/rtems/cache.h>
27 
28 #if SPARC_HAS_FPU == 1
30  offsetof( Per_CPU_Control, cpu_per_cpu.fsr)
31  == SPARC_PER_CPU_FSR_OFFSET,
32  SPARC_PER_CPU_FSR_OFFSET
33  );
34 
35  #if defined(SPARC_USE_LAZY_FP_SWITCH)
37  offsetof( Per_CPU_Control, cpu_per_cpu.fp_owner)
38  == SPARC_PER_CPU_FP_OWNER_OFFSET,
39  SPARC_PER_CPU_FP_OWNER_OFFSET
40  );
41  #endif
42 #endif
43 
44 #define SPARC_ASSERT_OFFSET(field, off) \
45  RTEMS_STATIC_ASSERT( \
46  offsetof(Context_Control, field) == off ## _OFFSET, \
47  Context_Control_offset_ ## field \
48  )
49 
50 SPARC_ASSERT_OFFSET(g5, G5);
51 SPARC_ASSERT_OFFSET(g7, G7);
52 
54  offsetof(Context_Control, l0_and_l1) == L0_OFFSET,
55  Context_Control_offset_L0
56 );
57 
59  offsetof(Context_Control, l0_and_l1) + 4 == L1_OFFSET,
60  Context_Control_offset_L1
61 );
62 
63 SPARC_ASSERT_OFFSET(l2, L2);
64 SPARC_ASSERT_OFFSET(l3, L3);
65 SPARC_ASSERT_OFFSET(l4, L4);
66 SPARC_ASSERT_OFFSET(l5, L5);
67 SPARC_ASSERT_OFFSET(l6, L6);
68 SPARC_ASSERT_OFFSET(l7, L7);
69 SPARC_ASSERT_OFFSET(i0, I0);
70 SPARC_ASSERT_OFFSET(i1, I1);
71 SPARC_ASSERT_OFFSET(i2, I2);
72 SPARC_ASSERT_OFFSET(i3, I3);
73 SPARC_ASSERT_OFFSET(i4, I4);
74 SPARC_ASSERT_OFFSET(i5, I5);
75 SPARC_ASSERT_OFFSET(i6_fp, I6_FP);
76 SPARC_ASSERT_OFFSET(i7, I7);
77 SPARC_ASSERT_OFFSET(o6_sp, O6_SP);
78 SPARC_ASSERT_OFFSET(o7, O7);
79 SPARC_ASSERT_OFFSET(psr, PSR);
80 SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
81 
82 #if defined(RTEMS_SMP)
83 SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
84 #endif
85 
86 #define SPARC_ASSERT_ISF_OFFSET(field, off) \
87  RTEMS_STATIC_ASSERT( \
88  offsetof(CPU_Interrupt_frame, field) == ISF_ ## off ## _OFFSET, \
89  CPU_Interrupt_frame_offset_ ## field \
90  )
91 
92 SPARC_ASSERT_ISF_OFFSET(psr, PSR);
93 SPARC_ASSERT_ISF_OFFSET(pc, PC);
94 SPARC_ASSERT_ISF_OFFSET(npc, NPC);
95 SPARC_ASSERT_ISF_OFFSET(g1, G1);
96 SPARC_ASSERT_ISF_OFFSET(g2, G2);
97 SPARC_ASSERT_ISF_OFFSET(g3, G3);
98 SPARC_ASSERT_ISF_OFFSET(g4, G4);
99 SPARC_ASSERT_ISF_OFFSET(g5, G5);
100 SPARC_ASSERT_ISF_OFFSET(g7, G7);
101 SPARC_ASSERT_ISF_OFFSET(i0, I0);
102 SPARC_ASSERT_ISF_OFFSET(i1, I1);
103 SPARC_ASSERT_ISF_OFFSET(i2, I2);
104 SPARC_ASSERT_ISF_OFFSET(i3, I3);
105 SPARC_ASSERT_ISF_OFFSET(i4, I4);
106 SPARC_ASSERT_ISF_OFFSET(i5, I5);
107 SPARC_ASSERT_ISF_OFFSET(i6_fp, I6_FP);
108 SPARC_ASSERT_ISF_OFFSET(i7, I7);
109 SPARC_ASSERT_ISF_OFFSET(y, Y);
110 SPARC_ASSERT_ISF_OFFSET(tpc, TPC);
111 
112 #define SPARC_ASSERT_FP_OFFSET(field, off) \
113  RTEMS_STATIC_ASSERT( \
114  offsetof(Context_Control_fp, field) == SPARC_FP_CONTEXT_OFFSET_ ## off, \
115  Context_Control_fp_offset_ ## field \
116  )
117 
118 SPARC_ASSERT_FP_OFFSET(f0_f1, F0_F1);
119 SPARC_ASSERT_FP_OFFSET(f2_f3, F2_F3);
120 SPARC_ASSERT_FP_OFFSET(f4_f5, F4_F5);
121 SPARC_ASSERT_FP_OFFSET(f6_f7, F6_F7);
122 SPARC_ASSERT_FP_OFFSET(f8_f9, F8_F9);
123 SPARC_ASSERT_FP_OFFSET(f10_f11, F10_F11);
124 SPARC_ASSERT_FP_OFFSET(f12_f13, F12_F13);
125 SPARC_ASSERT_FP_OFFSET(f14_f15, F14_F15);
126 SPARC_ASSERT_FP_OFFSET(f16_f17, F16_F17);
127 SPARC_ASSERT_FP_OFFSET(f18_f19, F18_F19);
128 SPARC_ASSERT_FP_OFFSET(f20_f21, F20_F21);
129 SPARC_ASSERT_FP_OFFSET(f22_f23, F22_F23);
130 SPARC_ASSERT_FP_OFFSET(f24_f25, F24_F25);
131 SPARC_ASSERT_FP_OFFSET(f26_f27, F26_F27);
132 SPARC_ASSERT_FP_OFFSET(f28_f29, F28_F29);
133 SPARC_ASSERT_FP_OFFSET(f30_f31, F30_F31);
134 SPARC_ASSERT_FP_OFFSET(fsr, FSR);
135 
139 );
140 
141 /* https://devel.rtems.org/ticket/2352 */
143  sizeof(CPU_Interrupt_frame) % CPU_ALIGNMENT == 0,
144  CPU_Interrupt_frame_alignment
145 );
146 
147 /*
148  * This initializes the set of opcodes placed in each trap
149  * table entry. The routine which installs a handler is responsible
150  * for filling in the fields for the _handler address and the _vector
151  * trap type.
152  *
153  * The constants following this structure are masks for the fields which
154  * must be filled in when the handler is installed.
155  */
157  0xa1480000, /* mov %psr, %l0 */
158  0x29000000, /* sethi %hi(_handler), %l4 */
159  0x81c52000, /* jmp %l4 + %lo(_handler) */
160  0xa6102000 /* mov _vector, %l3 */
161 };
162 
163 /*
164  * _CPU_Initialize
165  *
166  * This routine performs processor dependent initialization.
167  *
168  * INPUT PARAMETERS: NONE
169  *
170  * Output Parameters: NONE
171  *
172  * NOTE: There is no need to save the pointer to the thread dispatch routine.
173  * The SPARC's assembly code can reference it directly with no problems.
174  */
175 
176 void _CPU_Initialize(void)
177 {
178 #if defined(SPARC_USE_LAZY_FP_SWITCH)
179  __asm__ volatile (
180  ".global SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET\n"
181  ".set SPARC_THREAD_CONTROL_REGISTERS_FP_CONTEXT_OFFSET, %0\n"
182  ".global SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET\n"
183  ".set SPARC_THREAD_CONTROL_FP_CONTEXT_OFFSET, %1\n"
184  :
185  : "i" (offsetof(Thread_Control, Registers.fp_context)),
186  "i" (offsetof(Thread_Control, fp_context))
187  );
188 #endif
189 }
190 
191 uint32_t _CPU_ISR_Get_level( void )
192 {
193  uint32_t level;
194 
195  sparc_get_interrupt_level( level );
196 
197  return level;
198 }
199 
200 /*
201  * _CPU_ISR_install_raw_handler
202  *
203  * This routine installs the specified handler as a "raw" non-executive
204  * supported trap handler (a.k.a. interrupt service routine).
205  *
206  * Input Parameters:
207  * vector - trap table entry number plus synchronous
208  * vs. asynchronous information
209  * new_handler - address of the handler to be installed
210  * old_handler - pointer to an address of the handler previously installed
211  *
212  * Output Parameters: NONE
213  * *new_handler - address of the handler previously installed
214  *
215  * NOTE:
216  *
217  * On the SPARC, there are really only 256 vectors. However, the executive
218  * has no easy, fast, reliable way to determine which traps are synchronous
219  * and which are asynchronous. By default, synchronous traps return to the
220  * instruction which caused the interrupt. So if you install a software
221  * trap handler as an executive interrupt handler (which is desirable since
222  * RTEMS takes care of window and register issues), then the executive needs
223  * to know that the return address is to the trap rather than the instruction
224  * following the trap.
225  *
226  * So vectors 0 through 255 are treated as regular asynchronous traps which
227  * provide the "correct" return address. Vectors 256 through 512 are assumed
228  * by the executive to be synchronous and to require that the return address
229  * be fudged.
230  *
231  * If you use this mechanism to install a trap handler which must reexecute
232  * the instruction which caused the trap, then it should be installed as
233  * an asynchronous trap. This will avoid the executive changing the return
234  * address.
235  */
236 
238  uint32_t vector,
239  CPU_ISR_raw_handler new_handler,
240  CPU_ISR_raw_handler *old_handler
241 )
242 {
243  uint32_t real_vector;
245  CPU_Trap_table_entry *slot;
246  uint32_t u32_tbr;
247  uint32_t u32_handler;
248 
249  /*
250  * Get the "real" trap number for this vector ignoring the synchronous
251  * versus asynchronous indicator included with our vector numbers.
252  */
253 
254  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
255 
256  /*
257  * Get the current base address of the trap table and calculate a pointer
258  * to the slot we are interested in.
259  */
260 
261  sparc_get_tbr( u32_tbr );
262 
263  u32_tbr &= 0xfffff000;
264 
265  tbr = (CPU_Trap_table_entry *) u32_tbr;
266 
267  slot = &tbr[ real_vector ];
268 
269  /*
270  * Get the address of the old_handler from the trap table.
271  *
272  * NOTE: The old_handler returned will be bogus if it does not follow
273  * the RTEMS model.
274  */
275 
276 #define HIGH_BITS_MASK 0xFFFFFC00
277 #define HIGH_BITS_SHIFT 10
278 #define LOW_BITS_MASK 0x000003FF
279 
281  u32_handler =
282  (slot->sethi_of_handler_to_l4 << HIGH_BITS_SHIFT) |
283  (slot->jmp_to_low_of_handler_plus_l4 & LOW_BITS_MASK);
284  *old_handler = (CPU_ISR_raw_handler) u32_handler;
285  } else
286  *old_handler = 0;
287 
288  /*
289  * Copy the template to the slot and then fix it.
290  */
291 
292  *slot = _CPU_Trap_slot_template;
293 
294  u32_handler = (uint32_t) new_handler;
295 
296  slot->mov_vector_l3 |= vector;
297  slot->sethi_of_handler_to_l4 |=
298  (u32_handler & HIGH_BITS_MASK) >> HIGH_BITS_SHIFT;
299  slot->jmp_to_low_of_handler_plus_l4 |= (u32_handler & LOW_BITS_MASK);
300 
301  /*
302  * There is no instruction cache snooping, so we need to invalidate
303  * the instruction cache to make sure that the processor sees the
304  * changes to the trap table. This step is required on both single-
305  * and multiprocessor systems.
306  *
307  * In a SMP configuration a change to the trap table might be
308  * missed by other cores. If the system state is up, the other
309  * cores can be notified using SMP messages that they need to
310  * flush their icache. If the up state has not been reached
311  * there is no need to notify other cores. They will do an
312  * automatic flush of the icache just after entering the up
313  * state, but before enabling interrupts.
314  */
316 }
317 
319  uint32_t vector,
320  CPU_ISR_handler new_handler,
321  CPU_ISR_handler *old_handler
322 )
323 {
324  uint32_t real_vector;
325  CPU_ISR_raw_handler ignored;
326 
327  /*
328  * Get the "real" trap number for this vector ignoring the synchronous
329  * versus asynchronous indicator included with our vector numbers.
330  */
331 
332  real_vector = SPARC_REAL_TRAP_NUMBER( vector );
333 
334  /*
335  * Return the previous ISR handler.
336  */
337 
338  *old_handler = _ISR_Vector_table[ real_vector ];
339 
340  /*
341  * Install the wrapper so this ISR can be invoked properly.
342  */
343 
344  _CPU_ISR_install_raw_handler( vector, _ISR_Handler, &ignored );
345 
346  /*
347  * We put the actual user ISR address in '_ISR_vector_table'. This will
348  * be used by the _ISR_Handler so the user gets control.
349  */
350 
351  _ISR_Vector_table[ real_vector ] = new_handler;
352 }
353 
355  Context_Control *the_context,
356  uint32_t *stack_base,
357  uint32_t size,
358  uint32_t new_level,
359  void *entry_point,
360  bool is_fp,
361  void *tls_area
362 )
363 {
364  uint32_t stack_high; /* highest "stack aligned" address */
365  uint32_t tmp_psr;
366 
367  /*
368  * On CPUs with stacks which grow down (i.e. SPARC), we build the stack
369  * based on the stack_high address.
370  */
371 
372  stack_high = ((uint32_t)(stack_base) + size);
373  stack_high &= ~(CPU_STACK_ALIGNMENT - 1);
374 
375  /*
376  * See the README in this directory for a diagram of the stack.
377  */
378 
379  the_context->o7 = ((uint32_t) entry_point) - 8;
380  the_context->o6_sp = stack_high - SPARC_MINIMUM_STACK_FRAME_SIZE;
381  the_context->i6_fp = 0;
382 
383  /*
384  * Build the PSR for the task. Most everything can be 0 and the
385  * CWP is corrected during the context switch.
386  *
387  * The EF bit determines if the floating point unit is available.
388  * The FPU is ONLY enabled if the context is associated with an FP task
389  * and this SPARC model has an FPU.
390  */
391 
392  sparc_get_psr( tmp_psr );
393  tmp_psr &= ~SPARC_PSR_PIL_MASK;
394  tmp_psr |= (new_level << 8) & SPARC_PSR_PIL_MASK;
395  tmp_psr &= ~SPARC_PSR_EF_MASK; /* disabled by default */
396 
397  /* _CPU_Context_restore_heir() relies on this */
398  _Assert( ( tmp_psr & SPARC_PSR_ET_MASK ) != 0 );
399 
400 #if (SPARC_HAS_FPU == 1)
401  /*
402  * If this bit is not set, then a task gets a fault when it accesses
403  * a floating point register. This is a nice way to detect floating
404  * point tasks which are not currently declared as such.
405  */
406 
407  if ( is_fp )
408  tmp_psr |= SPARC_PSR_EF_MASK;
409 #endif
410  the_context->psr = tmp_psr;
411 
412  /*
413  * Since THIS thread is being created, there is no way that THIS
414  * thread can have an interrupt stack frame on its stack.
415  */
416  the_context->isr_dispatch_disable = 0;
417 
418  if ( tls_area != NULL ) {
419  void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
420 
421  the_context->g7 = (uintptr_t) tcb;
422  }
423 }
Thread-Local Storage (TLS)
const CPU_Trap_table_entry _CPU_Trap_slot_template
Definition: cpu.c:156
#define L1_OFFSET
Definition: cpu.h:427
#define CPU_STACK_ALIGNMENT
Definition: cpu.h:761
Definition: cpu.h:627
uint32_t mov_vector_l3
Definition: cpu.h:635
SPARC basic context.
Definition: cpu.h:318
uint32_t isr_dispatch_disable
Definition: cpu.h:377
Data Related to the Management of Processor Interrupt Levels.
uint32_t mov_psr_l0
Definition: cpu.h:629
Interrupt stack frame (ISF).
Definition: cpu.h:571
#define SPARC_REAL_TRAP_NUMBER(_trap)
Definition: cpu.h:713
uint32_t g7
Definition: cpu.h:322
uint32_t i6_fp
Definition: cpu.h:358
uint32_t sethi_of_handler_to_l4
Definition: cpu.h:631
#define sparc_get_interrupt_level(_level)
SPARC obtain interrupt level.
Definition: sparc.h:391
#define SPARC_MINIMUM_STACK_FRAME_SIZE
Definition: cpuimpl.h:32
#define SPARC_PSR_EF_MASK
Definition: sparc.h:129
uint32_t psr
Definition: cpu.h:371
Per CPU Core Structure.
Definition: percpu.h:347
This header file defines the Cache Manager API.
#define SPARC_PSR_ET_MASK
Definition: sparc.h:121
#define sparc_get_psr(_psr)
Macro to obtain the PSR.
Definition: sparc.h:194
#define CPU_ALIGNMENT
Definition: cpu.h:742
void _CPU_Initialize(void)
SPARC specific initialization.
Definition: cpu.c:176
#define RTEMS_STATIC_ASSERT(_cond, _msg)
Asserts at compile time that the specified condition is satisfied.
Definition: basedefs.h:838
void _ISR_Handler(void)
ISR interrupt dispatcher.
uint32_t jmp_to_low_of_handler_plus_l4
Definition: cpu.h:633
void _CPU_ISR_install_vector(uint32_t vector, CPU_ISR_handler new_handler, CPU_ISR_handler *old_handler)
SPARC specific RTEMS ISR installer.
Definition: cpu.c:318
void _CPU_ISR_install_raw_handler(uint32_t vector, CPU_ISR_raw_handler new_handler, CPU_ISR_raw_handler *old_handler)
SPARC specific raw ISR installer.
Definition: cpu.c:237
#define L0_OFFSET
Definition: cpu.h:425
uint32_t o6_sp
Definition: cpu.h:363
void rtems_cache_invalidate_entire_instruction(void)
%
Definition: cacheimpl.h:365
#define SPARC_PSR_PIL_MASK
Definition: sparc.h:127
uint32_t _CPU_ISR_Get_level(void)
Obtain the current interrupt disable level.
Definition: cpu.c:191
static void * _TLS_TCB_after_TLS_block_initialize(void *tls_area)
Initializes a dynamic thread vector with the area after a given starting address as thread control bl...
Definition: tls.h:272
#define sparc_get_tbr(_tbr)
Macro to obtain the TBR.
Definition: sparc.h:240
Constants and Structures Related with the Thread Control Block.
void _CPU_Context_Initialize(Context_Control *the_context, uint32_t *stack_base, uint32_t size, uint32_t new_level, void *entry_point, bool is_fp, void *tls_area)
Definition: cpu.c:354
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
uint32_t o7
Definition: cpu.h:368