RTEMS
threaddispatch.c
Go to the documentation of this file.
1 
9 /*
10  * COPYRIGHT (c) 1989-2009.
11  * On-Line Applications Research Corporation (OAR).
12  *
13  * Copyright (c) 2014, 2018 embedded brains GmbH.
14  *
15  * The license and distribution terms for this file may be
16  * found in the file LICENSE in this distribution or at
17  * http://www.rtems.org/license/LICENSE.
18  */
19 
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23 
25 #include <rtems/score/assert.h>
26 #include <rtems/score/isr.h>
28 #include <rtems/score/threadimpl.h>
29 #include <rtems/score/todimpl.h>
31 #include <rtems/score/wkspace.h>
32 #include <rtems/config.h>
33 
34 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
35 Thread_Control *_Thread_Allocated_fp;
36 #endif
37 
39 
40 #if defined(RTEMS_SMP)
41 static ISR_Level _Thread_Check_pinning(
42  Thread_Control *executing,
43  Per_CPU_Control *cpu_self,
44  ISR_Level level
45 )
46 {
47  unsigned int pin_level;
48 
49  pin_level = executing->Scheduler.pin_level;
50 
51  if (
52  RTEMS_PREDICT_FALSE( pin_level != 0 )
53  && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
54  ) {
55  ISR_lock_Context state_lock_context;
56  ISR_lock_Context scheduler_lock_context;
57  const Scheduler_Control *pinned_scheduler;
58  Scheduler_Node *pinned_node;
59  const Scheduler_Control *home_scheduler;
60 
61  _ISR_Local_enable( level );
62 
63  executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
64 
65  _Thread_State_acquire( executing, &state_lock_context );
66 
67  pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
69  executing,
70  _Scheduler_Get_index( pinned_scheduler )
71  );
72 
73  if ( _Thread_Is_ready( executing ) ) {
74  _Scheduler_Block( executing);
75  }
76 
77  home_scheduler = _Thread_Scheduler_get_home( executing );
78  executing->Scheduler.pinned_scheduler = pinned_scheduler;
79 
80  if ( home_scheduler != pinned_scheduler ) {
81  _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
83  &executing->Scheduler.Scheduler_nodes,
84  &pinned_node->Thread.Scheduler_node.Chain
85  );
86  }
87 
88  _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
89 
90  ( *pinned_scheduler->Operations.pin )(
91  pinned_scheduler,
92  executing,
93  pinned_node,
94  cpu_self
95  );
96 
97  if ( _Thread_Is_ready( executing ) ) {
98  ( *pinned_scheduler->Operations.unblock )(
99  pinned_scheduler,
100  executing,
101  pinned_node
102  );
103  }
104 
105  _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
106 
107  _Thread_State_release( executing, &state_lock_context );
108 
109  _ISR_Local_disable( level );
110  }
111 
112  return level;
113 }
114 
115 static void _Thread_Ask_for_help( Thread_Control *the_thread )
116 {
117  Chain_Node *node;
118  const Chain_Node *tail;
119 
120  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
121  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
122 
123  do {
124  Scheduler_Node *scheduler_node;
125  const Scheduler_Control *scheduler;
126  ISR_lock_Context lock_context;
127  bool success;
128 
129  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
130  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
131 
132  _Scheduler_Acquire_critical( scheduler, &lock_context );
133  success = ( *scheduler->Operations.ask_for_help )(
134  scheduler,
135  the_thread,
136  scheduler_node
137  );
138  _Scheduler_Release_critical( scheduler, &lock_context );
139 
140  if ( success ) {
141  break;
142  }
143 
144  node = _Chain_Next( node );
145  } while ( node != tail );
146 }
147 
148 static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
149 {
150  return executing->Scheduler.helping_nodes > 0
151  && _Thread_Is_ready( executing );
152 }
153 #endif
154 
155 static ISR_Level _Thread_Preemption_intervention(
156  Thread_Control *executing,
157  Per_CPU_Control *cpu_self,
158  ISR_Level level
159 )
160 {
161 #if defined(RTEMS_SMP)
162  ISR_lock_Context lock_context;
163 
164  level = _Thread_Check_pinning( executing, cpu_self, level );
165 
166  _Per_CPU_Acquire( cpu_self, &lock_context );
167 
168  while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
169  Chain_Node *node;
170  Thread_Control *the_thread;
171 
173  _Chain_Set_off_chain( node );
174  the_thread = THREAD_OF_SCHEDULER_HELP_NODE( node );
175 
176  _Per_CPU_Release( cpu_self, &lock_context );
177 
178  _Thread_State_acquire( the_thread, &lock_context );
179  _Thread_Ask_for_help( the_thread );
180  _Thread_State_release( the_thread, &lock_context );
181 
182  _Per_CPU_Acquire( cpu_self, &lock_context );
183  }
184 
185  _Per_CPU_Release( cpu_self, &lock_context );
186 #else
187  (void) cpu_self;
188 #endif
189 
190  return level;
191 }
192 
193 static void _Thread_Post_switch_cleanup( Thread_Control *executing )
194 {
195 #if defined(RTEMS_SMP)
196  Chain_Node *node;
197  const Chain_Node *tail;
198 
199  if ( !_Thread_Can_ask_for_help( executing ) ) {
200  return;
201  }
202 
203  node = _Chain_First( &executing->Scheduler.Scheduler_nodes );
204  tail = _Chain_Immutable_tail( &executing->Scheduler.Scheduler_nodes );
205 
206  do {
207  Scheduler_Node *scheduler_node;
208  const Scheduler_Control *scheduler;
209  ISR_lock_Context lock_context;
210 
211  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
212  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
213 
214  _Scheduler_Acquire_critical( scheduler, &lock_context );
215  ( *scheduler->Operations.reconsider_help_request )(
216  scheduler,
217  executing,
218  scheduler_node
219  );
220  _Scheduler_Release_critical( scheduler, &lock_context );
221 
222  node = _Chain_Next( node );
223  } while ( node != tail );
224 #else
225  (void) executing;
226 #endif
227 }
228 
229 static Thread_Action *_Thread_Get_post_switch_action(
230  Thread_Control *executing
231 )
232 {
233  Chain_Control *chain = &executing->Post_switch_actions.Chain;
234 
235  return (Thread_Action *) _Chain_Get_unprotected( chain );
236 }
237 
238 static void _Thread_Run_post_switch_actions( Thread_Control *executing )
239 {
240  ISR_lock_Context lock_context;
241  Thread_Action *action;
242 
243  _Thread_State_acquire( executing, &lock_context );
244  _Thread_Post_switch_cleanup( executing );
245  action = _Thread_Get_post_switch_action( executing );
246 
247  while ( action != NULL ) {
248  _Chain_Set_off_chain( &action->Node );
249 
250  ( *action->handler )( executing, action, &lock_context );
251 
252  _Thread_State_acquire( executing, &lock_context );
253  action = _Thread_Get_post_switch_action( executing );
254  }
255 
256  _Thread_State_release( executing, &lock_context );
257 }
258 
260 {
261  Thread_Control *executing;
262 
263  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
264 
265 #if defined(RTEMS_SCORE_ROBUST_THREAD_DISPATCH)
266  if (
267  !_ISR_Is_enabled( level )
268 #if defined(RTEMS_SMP) && CPU_ENABLE_ROBUST_THREAD_DISPATCH == FALSE
270 #endif
271  ) {
272  _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_ENVIRONMENT );
273  }
274 #endif
275 
276  executing = cpu_self->executing;
277 
278  do {
279  Thread_Control *heir;
280 
281  level = _Thread_Preemption_intervention( executing, cpu_self, level );
282  heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
283 
284  /*
285  * When the heir and executing are the same, then we are being
286  * requested to do the post switch dispatching. This is normally
287  * done to dispatch signals.
288  */
289  if ( heir == executing )
290  goto post_switch;
291 
292  /*
293  * Since heir and executing are not the same, we need to do a real
294  * context switch.
295  */
296  if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
298 
299  _ISR_Local_enable( level );
300 
301 #if !defined(RTEMS_SMP)
302  _User_extensions_Thread_switch( executing, heir );
303 #endif
304  _Thread_Save_fp( executing );
305  _Context_Switch( &executing->Registers, &heir->Registers );
306  _Thread_Restore_fp( executing );
307 #if defined(RTEMS_SMP)
308  _User_extensions_Thread_switch( NULL, executing );
309 #endif
310 
311  /*
312  * We have to obtain this value again after the context switch since the
313  * heir thread may have migrated from another processor. Values from the
314  * stack or non-volatile registers reflect the old execution environment.
315  */
316  cpu_self = _Per_CPU_Get();
317 
318  _ISR_Local_disable( level );
319  } while ( cpu_self->dispatch_necessary );
320 
321 post_switch:
322  _Assert( cpu_self->thread_dispatch_disable_level == 1 );
323  cpu_self->thread_dispatch_disable_level = 0;
324  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
325 
326  _ISR_Local_enable( level );
327 
328  _Thread_Run_post_switch_actions( executing );
329 }
330 
331 void _Thread_Dispatch( void )
332 {
333  ISR_Level level;
334  Per_CPU_Control *cpu_self;
335 
336  _ISR_Local_disable( level );
337 
338  cpu_self = _Per_CPU_Get();
339 
340  if ( cpu_self->dispatch_necessary ) {
341  _Profiling_Thread_dispatch_disable( cpu_self, 0 );
342  _Assert( cpu_self->thread_dispatch_disable_level == 0 );
343  cpu_self->thread_dispatch_disable_level = 1;
344  _Thread_Do_dispatch( cpu_self, level );
345  } else {
346  _ISR_Local_enable( level );
347  }
348 }
349 
351 {
352  ISR_Level level;
353 
354  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
355  _Internal_error( INTERNAL_ERROR_BAD_THREAD_DISPATCH_DISABLE_LEVEL );
356  }
357 
358  _ISR_Local_disable( level );
359  _Thread_Do_dispatch( cpu_self, level );
360 }
361 
363 {
364  uint32_t disable_level = cpu_self->thread_dispatch_disable_level;
365 
366  if ( disable_level == 1 ) {
367  ISR_Level level;
368 
369  _ISR_Local_disable( level );
370 
371  if (
372  cpu_self->dispatch_necessary
374  || !_ISR_Is_enabled( level )
375 #endif
376  ) {
377  _Thread_Do_dispatch( cpu_self, level );
378  } else {
379  cpu_self->thread_dispatch_disable_level = 0;
380  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
381  _ISR_Local_enable( level );
382  }
383  } else {
384  _Assert( disable_level > 0 );
385  cpu_self->thread_dispatch_disable_level = disable_level - 1;
386  }
387 }
static __inline__ const Scheduler_Control * _Scheduler_Get_by_CPU(const Per_CPU_Control *cpu)
Gets the scheduler for the cpu.
Definition: schedulerimpl.h:99
static __inline__ Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain&#39;s first node.
Definition: chainimpl.h:260
#define rtems_configuration_get_ticks_per_timeslice()
Returns the clock ticks per timeslice configured for this application.
Definition: config.h:277
static __inline__ void _Scheduler_Acquire_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Acquires the scheduler instance inside a critical section (interrupts disabled).
#define _ISR_Local_disable(_level)
Disables interrupts on this processor.
Definition: isrlevel.h:57
static __inline__ Scheduler_Node * _Thread_Scheduler_get_node_by_index(const Thread_Control *the_thread, size_t scheduler_index)
Gets the thread&#39;s scheduler node by index.
Definition: threadimpl.h:1460
void(* reconsider_help_request)(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Reconsider help operation.
Definition: scheduler.h:115
void _Thread_Dispatch_direct(Per_CPU_Control *cpu_self)
Directly do a thread dispatch.
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:420
static __inline__ const Scheduler_Control * _Thread_Scheduler_get_home(const Thread_Control *the_thread)
Gets the home scheduler of the thread.
Definition: threadimpl.h:1419
#define RTEMS_PREDICT_FALSE(_exp)
Returns the value of the specified integral expression and tells the compiler that the predicted valu...
Definition: basedefs.h:747
static __inline__ const bool _SMP_Need_inter_processor_interrupts(void)
Indicate if inter-processor interrupts are needed.
Definition: smpimpl.h:333
Data Related to the Management of Processor Interrupt Levels.
Inlined Routines Associated with the Manipulation of the Scheduler.
Constants and Structures Related with Thread Dispatch.
#define _ISR_Is_enabled(_level)
Returns true if interrupts are enabled in the specified interrupt level, otherwise returns false...
Definition: isrlevel.h:115
This header file defines parts of the application configuration information API.
#define CHAIN_DEFINE_EMPTY(name)
Chain definition for an empty chain with designator name.
Definition: chainimpl.h:61
static __inline__ Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Gets the first node (unprotected).
Definition: chainimpl.h:592
static void _Profiling_Thread_dispatch_enable(Per_CPU_Control *cpu, uint32_t new_thread_dispatch_disable_level)
Enables the thread dispatch.
Definition: profiling.h:109
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:764
Chain_Control _User_extensions_Switches_list
List of active task switch extensions.
void _Thread_Do_dispatch(Per_CPU_Control *cpu_self, ISR_Level level)
Performs a thread dispatch on the current processor.
User Extension Handler API.
static __inline__ void _Thread_State_release(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Releases the lock context and enables interrupts.
Definition: threadimpl.h:592
struct Scheduler_Node::@19 Thread
Block to register and manage this scheduler node in the thread control block of the owner of this sch...
Chain_Control Threads_in_need_for_help
Chain of threads in need for help.
Definition: percpu.h:497
static __inline__ void _Thread_Restore_fp(Thread_Control *executing)
Restores the executing thread&#39;s floating point area.
Definition: threadimpl.h:1029
size_t helping_nodes
Count of nodes scheduler nodes minus one.
Definition: thread.h:309
static void _User_extensions_Thread_switch(Thread_Control *executing, Thread_Control *heir)
Switches the thread from the executing to the heir.
Definition: userextimpl.h:366
Information for the Assert Handler.
static __inline__ void _Scheduler_Release_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Releases the scheduler instance inside a critical section (interrupts disabled).
Thread_CPU_budget_algorithms budget_algorithm
Definition: thread.h:814
static __inline__ void _Thread_Save_fp(Thread_Control *executing)
Checks if the floating point context of the thread is currently loaded in the floating point unit...
Definition: threadimpl.h:1014
static __inline__ void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepends a node (unprotected).
Definition: chainimpl.h:732
int pin_level
The thread pinning to current processor level.
Definition: thread.h:338
Per CPU Core Structure.
Definition: percpu.h:347
uint32_t ISR_Level
Definition: isrlevel.h:41
static __inline__ Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
Thread action.
Definition: thread.h:633
#define _ISR_Local_enable(_level)
Enables interrupts on this processor.
Definition: isrlevel.h:74
#define THREAD_OF_SCHEDULER_HELP_NODE(node)
Definition: threadimpl.h:76
static __inline__ void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
void(* unblock)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:65
static __inline__ void _Scheduler_Block(Thread_Control *the_thread)
Blocks a thread with respect to the scheduler.
void _Thread_Dispatch_enable(Per_CPU_Control *cpu_self)
Enables thread dispatching.
const struct _Scheduler_Control * pinned_scheduler
The pinned scheduler of this thread.
Definition: thread.h:263
Time of Day Handler API.
volatile bool dispatch_necessary
This is set to true when this processor needs to run the thread dispatcher.
Definition: percpu.h:400
Chain_Control Scheduler_nodes
Scheduler nodes immediately available to the schedulers for this thread.
Definition: thread.h:294
#define _Context_Switch(_executing, _heir)
Perform context switch.
Definition: context.h:106
static __inline__ const Scheduler_Control * _Scheduler_Node_get_scheduler(const Scheduler_Node *node)
Gets the scheduler of the node.
void _Thread_Dispatch(void)
Performs a thread dispatch if necessary.
static void _Profiling_Thread_dispatch_disable(Per_CPU_Control *cpu, uint32_t previous_thread_dispatch_disable_level)
Disables the thread dispatch if the previous thread dispatch disable level is zero.
Definition: profiling.h:51
#define RTEMS_SCORE_ROBUST_THREAD_DISPATCH
Enables a robust thread dispatch.
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:273
Scheduler control.
Definition: scheduler.h:264
Scheduler node for per-thread data.
Definition: schedulernode.h:79
volatile uint32_t thread_dispatch_disable_level
The thread dispatch critical section nesting counter which is used to prevent context switches at ino...
Definition: percpu.h:385
void _Internal_error(Internal_errors_Core_list core_error) RTEMS_NO_RETURN
Terminates the system with an INTERNAL_ERROR_CORE fatal source and the specified core error code...
Definition: interr.c:51
Inlined Routines from the Thread Handler.
void(* pin)(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, struct Per_CPU_Control *cpu)
Pin thread operation.
Definition: scheduler.h:145
bool(* ask_for_help)(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Ask for help operation.
Definition: scheduler.h:101
#define FALSE
If FALSE is undefined, then FALSE is defined to 0.
Definition: basedefs.h:645
static __inline__ void _Chain_Set_off_chain(Chain_Node *node)
Sets off chain.
Definition: chainimpl.h:104
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
static __inline__ bool _Thread_Is_ready(const Thread_Control *the_thread)
Checks if the thread is ready.
Definition: threadimpl.h:401
Context_Control Registers
Definition: thread.h:830
static __inline__ bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
static __inline__ void _Thread_State_acquire(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Disables interrupts and acquires the lock_context.
Definition: threadimpl.h:542
union Scheduler_Node::@19::@22 Scheduler_node
Node to add this scheduler node to Thread_Control::Scheduler::Scheduler_nodes or a temporary remove l...
Information Related to the RAM Workspace.
static __inline__ uint32_t _Scheduler_Get_index(const Scheduler_Control *scheduler)
Gets the index of the scheduler.
static __inline__ const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
uint32_t cpu_time_budget
Definition: thread.h:809
static __inline__ Thread_Control * _Thread_Get_heir_and_make_it_executing(Per_CPU_Control *cpu_self)
Gets the heir of the processor and makes it executing.
Definition: threadimpl.h:1135
static __inline__ Chain_Node * _Chain_Get_unprotected(Chain_Control *the_chain)
Gets the first node (unprotected).
Definition: chainimpl.h:630