RTEMS
schedulerimpl.h
Go to the documentation of this file.
1 
12 /*
13  * Copyright (C) 2010 Gedare Bloom.
14  * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
15  * Copyright (c) 2014, 2017 embedded brains GmbH
16  *
17  * The license and distribution terms for this file may be
18  * found in the file LICENSE in this distribution or at
19  * http://www.rtems.org/license/LICENSE.
20  */
21 
22 #ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
23 #define _RTEMS_SCORE_SCHEDULERIMPL_H
24 
25 #include <rtems/score/scheduler.h>
26 #include <rtems/score/assert.h>
28 #include <rtems/score/smpimpl.h>
29 #include <rtems/score/status.h>
30 #include <rtems/score/threadimpl.h>
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif
35 
77 
86  const Scheduler_Control *scheduler
87 )
88 {
89  return scheduler->context;
90 }
91 
100  const Per_CPU_Control *cpu
101 )
102 {
103 #if defined(RTEMS_SMP)
104  return cpu->Scheduler.control;
105 #else
106  (void) cpu;
107  return &_Scheduler_Table[ 0 ];
108 #endif
109 }
110 
120  const Scheduler_Control *scheduler,
121  ISR_lock_Context *lock_context
122 )
123 {
124 #if defined(RTEMS_SMP)
125  Scheduler_Context *context;
126 
127  context = _Scheduler_Get_context( scheduler );
128  _ISR_lock_Acquire( &context->Lock, lock_context );
129 #else
130  (void) scheduler;
131  (void) lock_context;
132 #endif
133 }
134 
144  const Scheduler_Control *scheduler,
145  ISR_lock_Context *lock_context
146 )
147 {
148 #if defined(RTEMS_SMP)
149  Scheduler_Context *context;
150 
151  context = _Scheduler_Get_context( scheduler );
152  _ISR_lock_Release( &context->Lock, lock_context );
153 #else
154  (void) scheduler;
155  (void) lock_context;
156 #endif
157 }
158 
159 #if defined(RTEMS_SMP)
160 
170  const Scheduler_Control *scheduler
171 )
172 {
173  return scheduler->is_non_preempt_mode_supported;
174 }
175 #endif
176 
177 #if defined(RTEMS_SMP)
178 void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
179 
192 {
193  _Assert( _Thread_State_is_owner( the_thread ) );
194 
195  if ( the_thread->Scheduler.helping_nodes > 0 ) {
196  _Scheduler_Request_ask_for_help( the_thread );
197  }
198 }
199 #endif
200 
209 /*
210  * Passing the Scheduler_Control* to these functions allows for multiple
211  * scheduler's to exist simultaneously, which could be useful on an SMP
212  * system. Then remote Schedulers may be accessible. How to protect such
213  * accesses remains an open problem.
214  */
215 
225 {
226  const Scheduler_Control *scheduler;
227  ISR_lock_Context lock_context;
228 
229  scheduler = _Thread_Scheduler_get_home( the_thread );
230  _Scheduler_Acquire_critical( scheduler, &lock_context );
231 
232  ( *scheduler->Operations.schedule )( scheduler, the_thread );
233 
234  _Scheduler_Release_critical( scheduler, &lock_context );
235 }
236 
246 {
247  const Scheduler_Control *scheduler;
248  ISR_lock_Context lock_context;
249 
250  scheduler = _Thread_Scheduler_get_home( the_thread );
251  _Scheduler_Acquire_critical( scheduler, &lock_context );
252  ( *scheduler->Operations.yield )(
253  scheduler,
254  the_thread,
255  _Thread_Scheduler_get_home_node( the_thread )
256  );
257  _Scheduler_Release_critical( scheduler, &lock_context );
258 }
259 
271 {
272 #if defined(RTEMS_SMP)
273  Chain_Node *node;
274  const Chain_Node *tail;
275  Scheduler_Node *scheduler_node;
276  const Scheduler_Control *scheduler;
277  ISR_lock_Context lock_context;
278 
279  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
280  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
281 
282  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
283  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
284 
285  _Scheduler_Acquire_critical( scheduler, &lock_context );
286  ( *scheduler->Operations.block )(
287  scheduler,
288  the_thread,
289  scheduler_node
290  );
291  _Scheduler_Release_critical( scheduler, &lock_context );
292 
293  node = _Chain_Next( node );
294 
295  while ( node != tail ) {
296  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
297  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
298 
299  _Scheduler_Acquire_critical( scheduler, &lock_context );
300  ( *scheduler->Operations.withdraw_node )(
301  scheduler,
302  the_thread,
303  scheduler_node,
305  );
306  _Scheduler_Release_critical( scheduler, &lock_context );
307 
308  node = _Chain_Next( node );
309  }
310 #else
311  const Scheduler_Control *scheduler;
312 
313  scheduler = _Thread_Scheduler_get_home( the_thread );
314  ( *scheduler->Operations.block )(
315  scheduler,
316  the_thread,
317  _Thread_Scheduler_get_home_node( the_thread )
318  );
319 #endif
320 }
321 
333 {
334  Scheduler_Node *scheduler_node;
335  const Scheduler_Control *scheduler;
336  ISR_lock_Context lock_context;
337 
338 #if defined(RTEMS_SMP)
339  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
340  _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
341  );
342  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
343 #else
344  scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
345  scheduler = _Thread_Scheduler_get_home( the_thread );
346 #endif
347 
348  _Scheduler_Acquire_critical( scheduler, &lock_context );
349  ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
350  _Scheduler_Release_critical( scheduler, &lock_context );
351 }
352 
368 {
369 #if defined(RTEMS_SMP)
370  Chain_Node *node;
371  const Chain_Node *tail;
372 
374 
375  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
376  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
377 
378  do {
379  Scheduler_Node *scheduler_node;
380  const Scheduler_Control *scheduler;
381  ISR_lock_Context lock_context;
382 
383  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
384  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
385 
386  _Scheduler_Acquire_critical( scheduler, &lock_context );
387  ( *scheduler->Operations.update_priority )(
388  scheduler,
389  the_thread,
390  scheduler_node
391  );
392  _Scheduler_Release_critical( scheduler, &lock_context );
393 
394  node = _Chain_Next( node );
395  } while ( node != tail );
396 #else
397  const Scheduler_Control *scheduler;
398 
399  scheduler = _Thread_Scheduler_get_home( the_thread );
400  ( *scheduler->Operations.update_priority )(
401  scheduler,
402  the_thread,
403  _Thread_Scheduler_get_home_node( the_thread )
404  );
405 #endif
406 }
407 
408 #if defined(RTEMS_SMP)
409 
418  Thread_Control *the_thread,
419  int sticky_level_change
420 )
421 {
422  Chain_Node *node;
423  const Chain_Node *tail;
424  Scheduler_Node *scheduler_node;
425  const Scheduler_Control *scheduler;
426  ISR_lock_Context lock_context;
427 
429 
430  node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
431  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
432  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
433 
434  _Scheduler_Acquire_critical( scheduler, &lock_context );
435 
436  scheduler_node->sticky_level += sticky_level_change;
437  _Assert( scheduler_node->sticky_level >= 0 );
438 
439  ( *scheduler->Operations.update_priority )(
440  scheduler,
441  the_thread,
442  scheduler_node
443  );
444 
445  _Scheduler_Release_critical( scheduler, &lock_context );
446 
447  tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
448  node = _Chain_Next( node );
449 
450  while ( node != tail ) {
451  scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
452  scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
453 
454  _Scheduler_Acquire_critical( scheduler, &lock_context );
455  ( *scheduler->Operations.update_priority )(
456  scheduler,
457  the_thread,
458  scheduler_node
459  );
460  _Scheduler_Release_critical( scheduler, &lock_context );
461 
462  node = _Chain_Next( node );
463  }
464 }
465 #endif
466 
481  const Scheduler_Control *scheduler,
482  Priority_Control priority
483 )
484 {
485  return ( *scheduler->Operations.map_priority )( scheduler, priority );
486 }
487 
497  const Scheduler_Control *scheduler,
498  Priority_Control priority
499 )
500 {
501  return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
502 }
503 
518  const Scheduler_Control *scheduler,
519  Scheduler_Node *node,
520  Thread_Control *the_thread,
521  Priority_Control priority
522 )
523 {
524  ( *scheduler->Operations.node_initialize )(
525  scheduler,
526  node,
527  the_thread,
528  priority
529  );
530 }
531 
542  const Scheduler_Control *scheduler,
543  Scheduler_Node *node
544 )
545 {
546  ( *scheduler->Operations.node_destroy )( scheduler, node );
547 }
548 
559  Thread_Control *the_thread,
560  Priority_Node *priority_node,
561  uint64_t deadline,
562  Thread_queue_Context *queue_context
563 )
564 {
565  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
566 
568  ( *scheduler->Operations.release_job )(
569  scheduler,
570  the_thread,
571  priority_node,
572  deadline,
573  queue_context
574  );
575 }
576 
586  Thread_Control *the_thread,
587  Priority_Node *priority_node,
588  Thread_queue_Context *queue_context
589 )
590 {
591  const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
592 
594  ( *scheduler->Operations.cancel_job )(
595  scheduler,
596  the_thread,
597  priority_node,
598  queue_context
599  );
600 }
601 
613 {
614  const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
615  Thread_Control *executing = cpu->executing;
616 
617  if ( scheduler != NULL && executing != NULL ) {
618  ( *scheduler->Operations.tick )( scheduler, executing );
619  }
620 }
621 
632  const Scheduler_Control *scheduler,
633  Thread_Control *the_thread,
634  Per_CPU_Control *cpu
635 )
636 {
637  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
638 }
639 
651  const Scheduler_Control *scheduler,
652  uint32_t cpu_index
653 )
654 {
655 #if defined(RTEMS_SMP)
656  const Per_CPU_Control *cpu;
657  const Scheduler_Control *scheduler_of_cpu;
658 
659  cpu = _Per_CPU_Get_by_index( cpu_index );
660  scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
661 
662  return scheduler_of_cpu == scheduler;
663 #else
664  (void) scheduler;
665  (void) cpu_index;
666 
667  return true;
668 #endif
669 }
670 
679  const Scheduler_Control *scheduler
680 )
681 {
682 #if defined(RTEMS_SMP)
683  return &_Scheduler_Get_context( scheduler )->Processors;
684 #else
685  return &_Processor_mask_The_one_and_only;
686 #endif
687 }
688 
700  Thread_Control *the_thread,
701  size_t cpusetsize,
702  cpu_set_t *cpuset
703 );
704 
717  const Scheduler_Control *scheduler,
718  Thread_Control *the_thread,
719  Scheduler_Node *node,
720  const Processor_mask *affinity
721 )
722 {
723  (void) scheduler;
724  (void) the_thread;
725  (void) node;
727 }
728 
740  Thread_Control *the_thread,
741  size_t cpusetsize,
742  const cpu_set_t *cpuset
743 );
744 
755  const Scheduler_Control *scheduler,
756  Thread_Control *the_thread,
757  Scheduler_Node *node,
758  void ( *extract )(
759  const Scheduler_Control *,
760  Thread_Control *,
762  ),
763  void ( *schedule )(
764  const Scheduler_Control *,
765  Thread_Control *,
766  bool
767  )
768 )
769 {
770  ( *extract )( scheduler, the_thread, node );
771 
772  /* TODO: flash critical section? */
773 
774  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
775  ( *schedule )( scheduler, the_thread, true );
776  }
777 }
778 
787  const Scheduler_Control *scheduler
788 )
789 {
790 #if defined(RTEMS_SMP)
791  const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
792 
793  return _Processor_mask_Count( &context->Processors );
794 #else
795  (void) scheduler;
796 
797  return 1;
798 #endif
799 }
800 
809 {
810  return _Objects_Build_id(
811  OBJECTS_FAKE_OBJECTS_API,
812  OBJECTS_FAKE_OBJECTS_SCHEDULERS,
814  (uint16_t) ( scheduler_index + 1 )
815  );
816 }
817 
826 {
827  uint32_t minimum_id = _Scheduler_Build_id( 0 );
828 
829  return id - minimum_id;
830 }
831 
840  Objects_Id id
841 )
842 {
843  uint32_t index;
844 
845  index = _Scheduler_Get_index_by_id( id );
846 
847  if ( index >= _Scheduler_Count ) {
848  return NULL;
849  }
850 
851  return &_Scheduler_Table[ index ];
852 }
853 
862  const Scheduler_Control *scheduler
863 )
864 {
865  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
866 }
867 
868 #if defined(RTEMS_SMP)
869 
877 typedef Thread_Control *( *Scheduler_Get_idle_thread )(
878  Scheduler_Context *context
879 );
880 
888  Scheduler_Context *context,
889  Thread_Control *idle
890 );
891 
899  Thread_Control *the_thread,
900  Thread_Scheduler_state new_state
901 )
902 {
903  _Assert(
904  _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
905  || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
907  );
908 
909  the_thread->Scheduler.state = new_state;
910 }
911 
919  Scheduler_Node *node,
920  Thread_Control *idle
921 )
922 {
923  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
924  _Assert(
926  );
927 
928  _Scheduler_Node_set_user( node, idle );
929  node->idle = idle;
930 }
931 
946  Scheduler_Context *context,
947  Scheduler_Node *node,
948  Per_CPU_Control *cpu,
949  Scheduler_Get_idle_thread get_idle_thread
950 )
951 {
952  Thread_Control *idle = ( *get_idle_thread )( context );
953 
954  _Scheduler_Set_idle_thread( node, idle );
955  _Thread_Set_CPU( idle, cpu );
956  return idle;
957 }
958 
963 typedef enum {
964  SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
965  SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
966  SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
968 
1002  Scheduler_Context *context,
1003  Scheduler_Node *node,
1004  const Thread_Control *idle,
1005  Scheduler_Get_idle_thread get_idle_thread
1006 )
1007 {
1008  ISR_lock_Context lock_context;
1010  Thread_Control *owner;
1011 
1012  action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
1013  owner = _Scheduler_Node_get_owner( node );
1014  _Assert( _Scheduler_Node_get_user( node ) == owner );
1015  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1016 
1017  _Thread_Scheduler_acquire_critical( owner, &lock_context );
1018 
1019  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
1022  } else if (
1024  && node->sticky_level <= 1
1025  ) {
1026  action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1027  } else if ( node->sticky_level == 0 ) {
1028  action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
1029  } else if ( idle != NULL ) {
1030  action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
1031  } else {
1033  context,
1034  node,
1035  _Thread_Get_CPU( owner ),
1036  get_idle_thread
1037  );
1038  }
1039 
1040  _Thread_Scheduler_release_critical( owner, &lock_context );
1041  return action;
1042 }
1043 
1055  Scheduler_Context *context,
1056  Scheduler_Node *node,
1057  Scheduler_Release_idle_thread release_idle_thread
1058 )
1059 {
1060  Thread_Control *idle = _Scheduler_Node_get_idle( node );
1061 
1062  if ( idle != NULL ) {
1063  Thread_Control *owner = _Scheduler_Node_get_owner( node );
1064 
1065  node->idle = NULL;
1066  _Scheduler_Node_set_user( node, owner );
1067  ( *release_idle_thread )( context, idle );
1068  }
1069 
1070  return idle;
1071 }
1072 
1082  Scheduler_Node *needs_idle,
1083  Scheduler_Node *uses_idle,
1084  Thread_Control *idle
1085 )
1086 {
1087  uses_idle->idle = NULL;
1089  uses_idle,
1090  _Scheduler_Node_get_owner( uses_idle )
1091  );
1092  _Scheduler_Set_idle_thread( needs_idle, idle );
1093 }
1094 
1111  Scheduler_Context *context,
1112  Thread_Control *thread,
1113  Scheduler_Node *node,
1114  bool is_scheduled,
1115  Scheduler_Get_idle_thread get_idle_thread
1116 )
1117 {
1118  int sticky_level;
1119  ISR_lock_Context lock_context;
1120  Per_CPU_Control *thread_cpu;
1121 
1122  sticky_level = node->sticky_level;
1123  --sticky_level;
1124  node->sticky_level = sticky_level;
1125  _Assert( sticky_level >= 0 );
1126 
1127  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1128  thread_cpu = _Thread_Get_CPU( thread );
1129  _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1131  _Thread_Scheduler_release_critical( thread, &lock_context );
1132 
1133  if ( sticky_level > 0 ) {
1134  if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1135  Thread_Control *idle;
1136 
1138  context,
1139  node,
1140  thread_cpu,
1141  get_idle_thread
1142  );
1143  _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1144  }
1145 
1146  return NULL;
1147  }
1148 
1149  _Assert( thread == _Scheduler_Node_get_user( node ) );
1150  return thread_cpu;
1151 }
1152 
1162  Scheduler_Context *context,
1163  Thread_Control *the_thread,
1164  Scheduler_Node *node,
1165  Scheduler_Release_idle_thread release_idle_thread
1166 )
1167 {
1168  Thread_Control *idle;
1169  Thread_Control *owner;
1170  Per_CPU_Control *cpu;
1171 
1172  idle = _Scheduler_Node_get_idle( node );
1173  owner = _Scheduler_Node_get_owner( node );
1174 
1175  node->idle = NULL;
1176  _Assert( _Scheduler_Node_get_user( node ) == idle );
1177  _Scheduler_Node_set_user( node, owner );
1178  ( *release_idle_thread )( context, idle );
1179 
1180  cpu = _Thread_Get_CPU( idle );
1181  _Thread_Set_CPU( the_thread, cpu );
1182  _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1183 }
1184 
1198  Scheduler_Context *context,
1199  Thread_Control *the_thread,
1200  Scheduler_Node *node,
1201  bool is_scheduled,
1202  Scheduler_Release_idle_thread release_idle_thread
1203 )
1204 {
1205  bool unblock;
1206 
1207  ++node->sticky_level;
1208  _Assert( node->sticky_level > 0 );
1209 
1210  if ( is_scheduled ) {
1212  context,
1213  the_thread,
1214  node,
1215  release_idle_thread
1216  );
1218  unblock = false;
1219  } else {
1221  unblock = true;
1222  }
1223 
1224  return unblock;
1225 }
1226 #endif
1227 
1236  Thread_Control *new_heir,
1237  bool force_dispatch
1238 )
1239 {
1240  Thread_Control *heir = _Thread_Heir;
1241 
1242  if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1243 #if defined(RTEMS_SMP)
1244  /*
1245  * We need this state only for _Thread_Get_CPU_time_used(). Cannot use
1246  * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1247  * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1248  * schedulers.
1249  */
1252 #endif
1254  _Thread_Heir = new_heir;
1255  _Thread_Dispatch_necessary = true;
1256  }
1257 }
1258 
1271  const Scheduler_Control *new_scheduler,
1272  Thread_Control *the_thread,
1273  Priority_Control priority
1274 )
1275 {
1276  Scheduler_Node *new_scheduler_node;
1277  Scheduler_Node *old_scheduler_node;
1278 #if defined(RTEMS_SMP)
1279  ISR_lock_Context lock_context;
1280  const Scheduler_Control *old_scheduler;
1281 
1282 #endif
1283 
1284  if ( the_thread->Wait.queue != NULL ) {
1285  return STATUS_RESOURCE_IN_USE;
1286  }
1287 
1288  old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1290  &old_scheduler_node->Wait.Priority,
1291  &the_thread->Real_priority
1292  );
1293 
1294  if (
1295  !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
1296 #if defined(RTEMS_SMP)
1297  || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1298  || the_thread->Scheduler.pin_level != 0
1299 #endif
1300  ) {
1302  &old_scheduler_node->Wait.Priority,
1303  &the_thread->Real_priority,
1304  the_thread->Real_priority.priority
1305  );
1306  return STATUS_RESOURCE_IN_USE;
1307  }
1308 
1309 #if defined(RTEMS_SMP)
1310  old_scheduler = _Thread_Scheduler_get_home( the_thread );
1311  new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1312  the_thread,
1313  _Scheduler_Get_index( new_scheduler )
1314  );
1315 
1316  _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1317 
1318  if (
1319  _Scheduler_Get_processor_count( new_scheduler ) == 0
1320  || !( *new_scheduler->Operations.set_affinity )(
1321  new_scheduler,
1322  the_thread,
1323  new_scheduler_node,
1324  &the_thread->Scheduler.Affinity
1325  )
1326  ) {
1327  _Scheduler_Release_critical( new_scheduler, &lock_context );
1329  &old_scheduler_node->Wait.Priority,
1330  &the_thread->Real_priority,
1331  the_thread->Real_priority.priority
1332  );
1333  return STATUS_UNSATISFIED;
1334  }
1335 
1336  _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1337  the_thread->Scheduler.home_scheduler = new_scheduler;
1338 
1339  _Scheduler_Release_critical( new_scheduler, &lock_context );
1340 
1341  _Thread_Scheduler_process_requests( the_thread );
1342 #else
1343  new_scheduler_node = old_scheduler_node;
1344 #endif
1345 
1346  the_thread->Start.initial_priority = priority;
1347  _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1349  &new_scheduler_node->Wait.Priority,
1350  &the_thread->Real_priority
1351  );
1352 
1353 #if defined(RTEMS_SMP)
1354  if ( old_scheduler != new_scheduler ) {
1355  States_Control current_state;
1356 
1357  current_state = the_thread->current_state;
1358 
1359  if ( _States_Is_ready( current_state ) ) {
1360  _Scheduler_Block( the_thread );
1361  }
1362 
1363  _Assert( old_scheduler_node->sticky_level == 0 );
1364  _Assert( new_scheduler_node->sticky_level == 0 );
1365 
1366  _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1367  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1369  &the_thread->Scheduler.Wait_nodes,
1370  &new_scheduler_node->Thread.Wait_node
1371  );
1373  &old_scheduler_node->Thread.Scheduler_node.Chain
1374  );
1375  _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1377  &the_thread->Scheduler.Scheduler_nodes,
1378  &new_scheduler_node->Thread.Scheduler_node.Chain
1379  );
1380 
1381  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1382 
1383  if ( _States_Is_ready( current_state ) ) {
1384  _Scheduler_Unblock( the_thread );
1385  }
1386 
1387  return STATUS_SUCCESSFUL;
1388  }
1389 #endif
1390 
1391  _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1392  _Scheduler_Update_priority( the_thread );
1393  return STATUS_SUCCESSFUL;
1394 }
1395 
1398 #ifdef __cplusplus
1399 }
1400 #endif
1401 
1402 #endif
1403 /* end of include file */
const struct _Scheduler_Control * home_scheduler
The home scheduler of this thread.
Definition: thread.h:258
int sticky_level
The sticky level determines if this scheduler node should use an idle thread in case this node is sch...
static __inline__ const Scheduler_Control * _Scheduler_Get_by_CPU(const Per_CPU_Control *cpu)
Gets the scheduler for the cpu.
Definition: schedulerimpl.h:99
static __inline__ Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain&#39;s first node.
Definition: chainimpl.h:260
static __inline__ void _Priority_Initialize_one(Priority_Aggregation *aggregation, Priority_Node *node)
Initializes the priority aggregation with the given information.
Definition: priorityimpl.h:232
void _Thread_Scheduler_process_requests(Thread_Control *the_thread)
Process the thread&#39;s scheduler requests.
static __inline__ void _Scheduler_Schedule(Thread_Control *the_thread)
General scheduling decision.
Scheduler_Try_to_schedule_action
This enumeration defines what a scheduler should do with a node which could be scheduled.
bool _Scheduler_Set_affinity(Thread_Control *the_thread, size_t cpusetsize, const cpu_set_t *cpuset)
Sets the thread&#39;s scheduler&#39;s affinity.
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
bool is_preemptible
Definition: thread.h:795
static __inline__ void _Scheduler_Acquire_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Acquires the scheduler instance inside a critical section (interrupts disabled).
Scheduler context.
Definition: scheduler.h:247
static __inline__ void _Scheduler_Exchange_idle_thread(Scheduler_Node *needs_idle, Scheduler_Node *uses_idle, Thread_Control *idle)
Exchanges an idle thread from the scheduler node that uses it right now to another scheduler node...
Thread_Wait_information Wait
Definition: thread.h:767
Thread queue context for the thread queue methods.
Definition: threadq.h:198
static __inline__ Scheduler_Node * _Thread_Scheduler_get_node_by_index(const Thread_Control *the_thread, size_t scheduler_index)
Gets the thread&#39;s scheduler node by index.
Definition: threadimpl.h:1460
The priority node to build up a priority aggregation.
Definition: priority.h:98
static __inline__ bool _System_state_Is_up(System_state_Codes state)
Checks if the state is up.
Definition: sysstate.h:133
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:420
static __inline__ bool _Thread_Is_heir(const Thread_Control *the_thread)
Checks if the thread is the heir.
Definition: threadimpl.h:949
static __inline__ const Scheduler_Control * _Thread_Scheduler_get_home(const Thread_Control *the_thread)
Gets the home scheduler of the thread.
Definition: threadimpl.h:1419
void(* block)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:58
RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Count(const Processor_mask *a)
Gets the number of set bits in the processor mask.
static __inline__ Status_Control _Scheduler_Set(const Scheduler_Control *new_scheduler, Thread_Control *the_thread, Priority_Control priority)
Sets a new scheduler.
static __inline__ Priority_Control _Scheduler_Map_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Maps a thread priority from the user domain to the scheduler domain.
Thread_Start_information Start
Definition: thread.h:825
static __inline__ const Processor_mask * _SMP_Get_online_processors(void)
Gets all online processors.
Definition: smpimpl.h:318
Thread_Control *(* Scheduler_Get_idle_thread)(Scheduler_Context *context)
Gets an idle thread from the scheduler instance.
static __inline__ void _Scheduler_Cancel_job(Thread_Control *the_thread, Priority_Node *priority_node, Thread_queue_Context *queue_context)
Cancels a job of a thread with respect to the scheduler.
This thread is scheduled with respect to the scheduler.
Definition: thread.h:229
Priority_Control priority
The priority value of this node.
Definition: priority.h:110
This thread is ready with respect to the scheduler.
Definition: thread.h:236
static __inline__ void _Scheduler_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a scheduler node.
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:764
Priority_Control(* unmap_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:85
static __inline__ void _Scheduler_Start_idle(const Scheduler_Control *scheduler, Thread_Control *the_thread, Per_CPU_Control *cpu)
Starts the idle thread for a particular processor.
bool is_non_preempt_mode_supported
True if the non-preempt mode for threads is supported by the scheduler, otherwise false...
Definition: scheduler.h:293
Thread_queue_Queue * queue
The current thread queue.
Definition: thread.h:482
void(* withdraw_node)(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, Thread_Scheduler_state next_state)
Withdraw node operation.
Definition: scheduler.h:130
void(* yield)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:51
bool(* set_affinity)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *, const Processor_mask *)
Definition: scheduler.h:232
static __inline__ void _Priority_Plain_extract(Priority_Aggregation *aggregation, Priority_Node *node)
Extracts the priority node from the aggregation.
Definition: priorityimpl.h:433
void(* node_initialize)(const Scheduler_Control *, Scheduler_Node *, Thread_Control *, Priority_Control)
Definition: scheduler.h:193
struct Scheduler_Node::@19 Thread
Block to register and manage this scheduler node in the thread control block of the owner of this sch...
void _Scheduler_Handler_initialization(void)
Initializes the scheduler to the policy chosen by the user.
Definition: scheduler.c:24
static __inline__ void _Scheduler_Update_heir(Thread_Control *new_heir, bool force_dispatch)
Updates the heir.
void(* node_destroy)(const Scheduler_Control *, Scheduler_Node *)
Definition: scheduler.h:201
uint32_t States_Control
Definition: states.h:46
struct _Thread_Control * idle
The idle thread claimed by this node in case the sticky level is greater than zero and the thread is ...
size_t helping_nodes
Count of nodes scheduler nodes minus one.
Definition: thread.h:309
Information for the Assert Handler.
static __inline__ void _Scheduler_Tick(const Per_CPU_Control *cpu)
Scheduler method invoked at each clock tick.
Chain_Node Wait_node
Node to add this scheduler node to Thread_Control::Scheduler::Wait_nodes.
static __inline__ void _Scheduler_Thread_change_state(Thread_Control *the_thread, Thread_Scheduler_state new_state)
Changes the threads state to the given new state.
Processor_mask Affinity
The thread processor affinity set.
Definition: thread.h:343
static __inline__ void _Scheduler_Release_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Releases the scheduler instance inside a critical section (interrupts disabled).
static __inline__ uint32_t _Scheduler_Get_processor_count(const Scheduler_Control *scheduler)
Gets the number of processors of the scheduler.
This thread is blocked with respect to the scheduler.
Definition: thread.h:220
static __inline__ Priority_Control _Scheduler_Unmap_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Unmaps a thread priority from the scheduler domain to the user domain.
static __inline__ Per_CPU_Control * _Scheduler_Block_node(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, bool is_scheduled, Scheduler_Get_idle_thread get_idle_thread)
Blocks this scheduler node.
void(* tick)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:221
static __inline__ void _Thread_Scheduler_cancel_need_for_help(Thread_Control *the_thread, Per_CPU_Control *cpu)
Cancels the thread&#39;s need for help.
Definition: threadimpl.h:1394
int pin_level
The thread pinning to current processor level.
Definition: thread.h:338
Thread_Scheduler_state
The thread state with respect to the scheduler.
Definition: thread.h:214
Per CPU Core Structure.
Definition: percpu.h:347
static __inline__ void _Scheduler_Set_idle_thread(Scheduler_Node *node, Thread_Control *idle)
Sets the scheduler node&#39;s idle thread.
static __inline__ void _Scheduler_Ask_for_help(Thread_Control *the_thread)
Registers an ask for help request if necessary.
static __inline__ Scheduler_Node * _Thread_Scheduler_get_home_node(const Thread_Control *the_thread)
Gets the scheduler&#39;s home node.
Definition: threadimpl.h:1438
static __inline__ Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
static __inline__ void _Scheduler_Unblock(Thread_Control *the_thread)
Unblocks a thread with respect to the scheduler.
static __inline__ void _Thread_Set_CPU(Thread_Control *thread, Per_CPU_Control *cpu)
Sets the cpu of the thread&#39;s scheduler.
Definition: threadimpl.h:886
static __inline__ void _Scheduler_Priority_and_sticky_update(Thread_Control *the_thread, int sticky_level_change)
Changes the sticky level of the home scheduler node and propagates a priority change of a thread to t...
static __inline__ Thread_Control * _Scheduler_Node_get_owner(const Scheduler_Node *node)
Gets the owner of the node.
Processor_mask Processors
Lock to protect this scheduler instance.
Definition: scheduler.h:257
static __inline__ void _Scheduler_Generic_block(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, void(*extract)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *), void(*schedule)(const Scheduler_Control *, Thread_Control *, bool))
Blocks the thread.
static __inline__ const Processor_mask * _Scheduler_Get_processors(const Scheduler_Control *scheduler)
Gets the processors of the scheduler.
static __inline__ bool _Chain_Has_only_one_node(const Chain_Control *the_chain)
Checks if this chain has only one node.
Definition: chainimpl.h:450
void(* schedule)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:48
static __inline__ void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
static __inline__ Thread_Control * _Scheduler_Node_get_idle(const Scheduler_Node *node)
Gets the idle thread of the node.
States_Control current_state
Definition: thread.h:749
static __inline__ void _Scheduler_Discard_idle_thread(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node, Scheduler_Release_idle_thread release_idle_thread)
Discard the idle thread from the scheduler node.
void(* unblock)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:65
void(* cancel_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, Thread_queue_Context *)
Definition: scheduler.h:213
#define _ISR_lock_Acquire(_lock, _context)
Acquires an ISR lock inside an ISR disabled section.
Definition: isrlock.h:284
const struct _Scheduler_Control * control
The scheduler control of the scheduler owning this processor.
Definition: percpu.h:514
static __inline__ void _Scheduler_Block(Thread_Control *the_thread)
Blocks a thread with respect to the scheduler.
static __inline__ void _Thread_Scheduler_acquire_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Acquires the lock context in a critical section.
Definition: threadimpl.h:1483
static __inline__ bool _Scheduler_Has_processor_ownership(const Scheduler_Control *scheduler, uint32_t cpu_index)
Checks if the scheduler of the cpu with the given index is equal to the given scheduler.
const Scheduler_Control _Scheduler_Table[]
This table contains the configured schedulers.
#define _ISR_lock_Release(_lock, _context)
Releases an ISR lock inside an ISR disabled section.
Definition: isrlock.h:310
static __inline__ void _Thread_Scheduler_release_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Releases the lock context in a critical section.
Definition: threadimpl.h:1497
static __inline__ void _Priority_Node_set_priority(Priority_Node *node, Priority_Control priority)
Sets the priority of the priority node to the given priority.
Definition: priorityimpl.h:171
bool _Scheduler_Get_affinity(Thread_Control *the_thread, size_t cpusetsize, cpu_set_t *cpuset)
Copies the thread&#39;s scheduler&#39;s affinity to the given cpuset.
static __inline__ void _Thread_queue_Context_clear_priority_updates(Thread_queue_Context *queue_context)
Clears the priority update count of the thread queue context.
Definition: threadqimpl.h:338
Thread_Scheduler_state state
The current scheduler state of this thread.
Definition: thread.h:253
static __inline__ bool _States_Is_ready(States_Control the_states)
Checks if the state is ready.
Definition: statesimpl.h:195
static __inline__ Thread_Control * _Scheduler_Release_idle_thread(Scheduler_Context *context, Scheduler_Node *node, Scheduler_Release_idle_thread release_idle_thread)
Releases an idle thread using this scheduler node.
static __inline__ bool _Scheduler_Is_non_preempt_mode_supported(const Scheduler_Control *scheduler)
Indicate if the thread non-preempt mode is supported by the scheduler.
const struct _Scheduler_Control * pinned_scheduler
The pinned scheduler of this thread.
Definition: thread.h:263
void(* Scheduler_Release_idle_thread)(Scheduler_Context *context, Thread_Control *idle)
Releases an idle thread to the scheduler instance for reuse.
static __inline__ bool _Scheduler_Unblock_node(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node, bool is_scheduled, Scheduler_Release_idle_thread release_idle_thread)
Unblocks this scheduler node.
static __inline__ bool _Thread_Is_executing(const Thread_Control *the_thread)
Checks if the thread is the currently executing thread.
Definition: threadimpl.h:910
Priority_Control initial_priority
Definition: thread.h:195
static __inline__ Scheduler_Try_to_schedule_action _Scheduler_Try_to_schedule_node(Scheduler_Context *context, Scheduler_Node *node, const Thread_Control *idle, Scheduler_Get_idle_thread get_idle_thread)
Tries to schedule the scheduler node.
static __inline__ bool _Priority_Is_empty(const Priority_Aggregation *aggregation)
Checks if the priority aggregation is empty.
Definition: priorityimpl.h:256
Chain_Control Scheduler_nodes
Scheduler nodes immediately available to the schedulers for this thread.
Definition: thread.h:294
static __inline__ Objects_Id _Scheduler_Build_id(uint32_t scheduler_index)
Builds an object build id.
static __inline__ void _Chain_Initialize_one(Chain_Control *the_chain, Chain_Node *the_node)
Initializes this chain to contain exactly the specified node.
Definition: chainimpl.h:528
struct Scheduler_Node::@20 Wait
Thread wait support block.
Priority_Control(* map_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:79
static __inline__ System_state_Codes _System_state_Get(void)
Gets the current system state.
Definition: sysstate.h:90
static __inline__ Per_CPU_Control * _Thread_Get_CPU(const Thread_Control *thread)
Gets the cpu of the thread&#39;s scheduler.
Definition: threadimpl.h:867
#define _Objects_Local_node
The local MPCI node number.
Definition: object.h:347
static __inline__ void _Scheduler_Update_priority(Thread_Control *the_thread)
Propagates a priority change of a thread to the scheduler.
static __inline__ void _Scheduler_Node_set_priority(Scheduler_Node *node, Priority_Control new_priority, bool prepend_it)
Sets the priority of the node.
Priority Handler API Implementation.
void(* update_priority)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:72
static __inline__ const Scheduler_Control * _Scheduler_Node_get_scheduler(const Scheduler_Node *node)
Gets the scheduler of the node.
#define _Objects_Build_id(the_api, the_class, node, index)
Builds an object ID from its components.
Definition: object.h:317
static __inline__ void _Scheduler_Yield(Thread_Control *the_thread)
Scheduler yield with a particular thread.
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:273
const size_t _Scheduler_Count
This constant contains the count of configured schedulers.
Scheduler control.
Definition: scheduler.h:264
Scheduler node for per-thread data.
Definition: schedulernode.h:79
static __inline__ Scheduler_Context * _Scheduler_Get_context(const Scheduler_Control *scheduler)
Gets the context of the scheduler.
Definition: schedulerimpl.h:85
Inlined Routines from the Thread Handler.
uint32_t Objects_Id
Definition: object.h:80
static __inline__ uint32_t _Scheduler_Get_index_by_id(Objects_Id id)
Gets the scheduler index from the given object build id.
static __inline__ void _Thread_Dispatch_update_heir(Per_CPU_Control *cpu_self, Per_CPU_Control *cpu_for_heir, Thread_Control *heir)
Updates the used cpu time for the heir and dispatches a new heir.
Definition: threadimpl.h:1177
Chain_Control Wait_nodes
Scheduler nodes immediately available to the thread by its home scheduler and due to thread queue own...
Definition: thread.h:279
RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(const Processor_mask *big, const Processor_mask *small)
Checks if the processor set small is a subset of processor set big.
void(* start_idle)(const Scheduler_Control *, Thread_Control *, struct Per_CPU_Control *)
Definition: scheduler.h:224
Scheduler_Context * context
Reference to a statically allocated scheduler context.
Definition: scheduler.h:268
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
#define RTEMS_INLINE_ROUTINE
Gives a hint to the compiler in a function declaration to inline this function.
Definition: basedefs.h:683
static __inline__ Thread_Control * _Scheduler_Use_idle_thread(Scheduler_Context *context, Scheduler_Node *node, Per_CPU_Control *cpu, Scheduler_Get_idle_thread get_idle_thread)
Uses an idle thread for this scheduler node.
ISR_lock_Control Lock
Lock to protect the scheduler node change requests.
Definition: thread.h:248
static __inline__ void _Scheduler_Release_job(Thread_Control *the_thread, Priority_Node *priority_node, uint64_t deadline, Thread_queue_Context *queue_context)
Releases a job of a thread with respect to the scheduler.
static __inline__ bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
Priority_Node Real_priority
The base priority of this thread in its home scheduler instance.
Definition: thread.h:754
Constants and Structures Associated with the Scheduler.
static __inline__ bool _Scheduler_default_Set_affinity_body(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, const Processor_mask *affinity)
Checks if the affinity is a subset of the online processors.
static __inline__ void _Scheduler_Node_destroy(const Scheduler_Control *scheduler, Scheduler_Node *node)
Destroys a scheduler node.
union Scheduler_Node::@19::@22 Scheduler_node
Node to add this scheduler node to Thread_Control::Scheduler::Scheduler_nodes or a temporary remove l...
static __inline__ Thread_Control * _Scheduler_Node_get_user(const Scheduler_Node *node)
Gets the user of the node.
static __inline__ uint32_t _Scheduler_Get_index(const Scheduler_Control *scheduler)
Gets the index of the scheduler.
void(* release_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, uint64_t, Thread_queue_Context *)
Definition: scheduler.h:204
static __inline__ const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
static __inline__ const Scheduler_Control * _Scheduler_Get_by_id(Objects_Id id)
Gets the scheduler from the given object build id.
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
static __inline__ void _Thread_Update_CPU_time_used(Thread_Control *the_thread, Per_CPU_Control *cpu)
Updates the cpu time used of the thread.
Definition: threadimpl.h:1155
SuperCore SMP Implementation.
static __inline__ bool _Priority_Plain_insert(Priority_Aggregation *aggregation, Priority_Node *node, Priority_Control priority)
Inserts the node with the given priority into the priority aggregation&#39;s contributors.
Definition: priorityimpl.h:411
static __inline__ void _Scheduler_Node_set_user(Scheduler_Node *node, Thread_Control *user)
Sets the user of the node.