RTEMS
schedulersmpimpl.h
Go to the documentation of this file.
1 
9 /*
10  * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
11  *
12  * embedded brains GmbH
13  * Dornierstr. 4
14  * 82178 Puchheim
15  * Germany
16  * <rtems@embedded-brains.de>
17  *
18  * The license and distribution terms for this file may be
19  * found in the file LICENSE in this distribution or at
20  * http://www.rtems.org/license/LICENSE.
21  */
22 
23 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25 
27 #include <rtems/score/assert.h>
28 #include <rtems/score/chainimpl.h>
30 #include <rtems/bspIo.h>
31 
32 #ifdef __cplusplus
33 extern "C" {
34 #endif /* __cplusplus */
35 
278 typedef bool ( *Scheduler_SMP_Has_ready )(
279  Scheduler_Context *context
280 );
281 
282 typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
283  Scheduler_Context *context,
284  Scheduler_Node *node
285 );
286 
287 typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
288  Scheduler_Context *context,
289  Scheduler_Node *filter
290 );
291 
292 typedef void ( *Scheduler_SMP_Extract )(
293  Scheduler_Context *context,
294  Scheduler_Node *node_to_extract
295 );
296 
297 typedef void ( *Scheduler_SMP_Insert )(
298  Scheduler_Context *context,
299  Scheduler_Node *node_to_insert,
300  Priority_Control insert_priority
301 );
302 
303 typedef void ( *Scheduler_SMP_Move )(
304  Scheduler_Context *context,
305  Scheduler_Node *node_to_move
306 );
307 
308 typedef bool ( *Scheduler_SMP_Ask_for_help )(
309  Scheduler_Context *context,
310  Thread_Control *thread,
311  Scheduler_Node *node
312 );
313 
314 typedef void ( *Scheduler_SMP_Update )(
315  Scheduler_Context *context,
316  Scheduler_Node *node_to_update,
317  Priority_Control new_priority
318 );
319 
320 typedef void ( *Scheduler_SMP_Set_affinity )(
321  Scheduler_Context *context,
322  Scheduler_Node *node,
323  void *arg
324 );
325 
326 typedef bool ( *Scheduler_SMP_Enqueue )(
327  Scheduler_Context *context,
328  Scheduler_Node *node_to_enqueue,
329  Priority_Control priority
330 );
331 
332 typedef void ( *Scheduler_SMP_Allocate_processor )(
333  Scheduler_Context *context,
334  Scheduler_Node *scheduled,
335  Scheduler_Node *victim,
336  Per_CPU_Control *victim_cpu
337 );
338 
339 typedef void ( *Scheduler_SMP_Register_idle )(
340  Scheduler_Context *context,
341  Scheduler_Node *idle,
342  Per_CPU_Control *cpu
343 );
344 
353  Scheduler_Context *context,
354  Scheduler_Node *idle,
355  Per_CPU_Control *cpu
356 )
357 {
358  (void) context;
359  (void) idle;
360  (void) cpu;
361 }
362 
373  const void *to_insert,
374  const Chain_Node *next
375 )
376 {
377  const Priority_Control *priority_to_insert;
378  const Scheduler_SMP_Node *node_next;
379 
380  priority_to_insert = (const Priority_Control *) to_insert;
381  node_next = (const Scheduler_SMP_Node *) next;
382 
383  return *priority_to_insert <= node_next->priority;
384 }
385 
394  Scheduler_Context *context
395 )
396 {
397  return (Scheduler_SMP_Context *) context;
398 }
399 
405 static inline void _Scheduler_SMP_Initialize(
407 )
408 {
409  _Chain_Initialize_empty( &self->Scheduled );
410  _Chain_Initialize_empty( &self->Idle_threads );
411 }
412 
421  Thread_Control *thread
422 )
423 {
425 }
426 
435  Thread_Control *thread
436 )
437 {
439 }
440 
449  Scheduler_Node *node
450 )
451 {
452  return (Scheduler_SMP_Node *) node;
453 }
454 
463  const Scheduler_Node *node
464 )
465 {
466  return ( (const Scheduler_SMP_Node *) node )->state;
467 }
468 
477  const Scheduler_Node *node
478 )
479 {
480  return ( (const Scheduler_SMP_Node *) node )->priority;
481 }
482 
492  const Scheduler_Control *scheduler,
493  Scheduler_SMP_Node *node,
494  Thread_Control *thread,
495  Priority_Control priority
496 )
497 {
498  _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
500  node->priority = priority;
501 }
502 
510  Scheduler_SMP_Node *node,
511  Priority_Control new_priority
512 )
513 {
514  node->priority = new_priority;
515 }
516 
524  Scheduler_Node *node,
525  Scheduler_SMP_Node_state new_state
526 )
527 {
528  Scheduler_SMP_Node *the_node;
529 
530  the_node = _Scheduler_SMP_Node_downcast( node );
531  the_node->state = new_state;
532 }
533 
544  const Scheduler_Context *context,
545  const Per_CPU_Control *cpu
546 )
547 {
548  return cpu->Scheduler.context == context;
549 }
550 
559  Scheduler_Context *context
560 )
561 {
563  Thread_Control *idle = (Thread_Control *)
564  _Chain_Get_first_unprotected( &self->Idle_threads );
565 
566  _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
567 
568  return idle;
569 }
570 
578  Scheduler_Context *context,
579  Thread_Control *idle
580 )
581 {
583 
584  _Chain_Prepend_unprotected( &self->Idle_threads, &idle->Object.Node );
585 }
586 
593  Thread_Control *idle
594 )
595 {
597 }
598 
610  Scheduler_Context *context,
611  Scheduler_Node *scheduled,
612  Scheduler_Node *victim,
613  Per_CPU_Control *victim_cpu
614 )
615 {
616  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
617  Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
618  Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
619  Per_CPU_Control *cpu_self = _Per_CPU_Get();
620  Thread_Control *heir;
621 
622  _Assert( _ISR_Get_level() != 0 );
623 
624  if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
625  if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
626  heir = scheduled_cpu->heir;
628  cpu_self,
629  scheduled_cpu,
630  scheduled_thread
631  );
632  } else {
633  /* We have to force a migration to our processor set */
634  heir = scheduled_thread;
635  }
636  } else {
637  heir = scheduled_thread;
638  }
639 
640  if ( heir != victim_thread ) {
641  _Thread_Set_CPU( heir, victim_cpu );
642  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
643  }
644 }
645 
660  Scheduler_Context *context,
661  Scheduler_Node *scheduled,
662  Scheduler_Node *victim,
663  Per_CPU_Control *victim_cpu
664 )
665 {
666  Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
667  Per_CPU_Control *cpu_self = _Per_CPU_Get();
668 
669  (void) context;
670  (void) victim;
671 
672  _Thread_Set_CPU( scheduled_thread, victim_cpu );
673  _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
674 }
675 
686  Scheduler_Context *context,
687  Scheduler_Node *scheduled,
688  Scheduler_Node *victim,
689  Per_CPU_Control *victim_cpu,
690  Scheduler_SMP_Allocate_processor allocate_processor
691 )
692 {
694  ( *allocate_processor )( context, scheduled, victim, victim_cpu );
695 }
696 
708  Scheduler_Context *context,
709  Scheduler_Node *scheduled,
710  Scheduler_Node *victim,
711  Scheduler_SMP_Allocate_processor allocate_processor
712 )
713 {
714  Thread_Control *victim_thread;
715  ISR_lock_Context scheduler_lock_context;
716  Per_CPU_Control *victim_cpu;
717 
718  victim_thread = _Scheduler_Node_get_user( victim );
720 
721  _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
722 
723  victim_cpu = _Thread_Get_CPU( victim_thread );
724 
725  if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
727 
728  if ( victim_thread->Scheduler.helping_nodes > 0 ) {
729  ISR_lock_Context per_cpu_lock_context;
730 
731  _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
733  &victim_cpu->Threads_in_need_for_help,
734  &victim_thread->Scheduler.Help_node
735  );
736  _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
737  }
738  }
739 
740  _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
741 
743  context,
744  scheduled,
745  victim,
746  victim_cpu,
747  allocate_processor
748  );
749 
750  return victim_thread;
751 }
752 
762  Scheduler_Context *context,
763  Scheduler_Node *filter
764 )
765 {
767  Chain_Control *scheduled = &self->Scheduled;
768  Scheduler_Node *lowest_scheduled =
769  (Scheduler_Node *) _Chain_Last( scheduled );
770 
771  (void) filter;
772 
773  _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
774  _Assert(
775  _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
776  );
777 
778  return lowest_scheduled;
779 }
780 
798  Scheduler_Context *context,
799  Scheduler_Node *node,
800  Priority_Control priority,
801  Scheduler_Node *lowest_scheduled,
802  Scheduler_SMP_Insert insert_scheduled,
803  Scheduler_SMP_Move move_from_scheduled_to_ready,
804  Scheduler_SMP_Allocate_processor allocate_processor
805 )
806 {
808 
810  context,
811  node,
812  _Scheduler_Node_get_idle( lowest_scheduled ),
814  );
815 
816  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
818  context,
819  node,
820  lowest_scheduled,
821  allocate_processor
822  );
823 
824  ( *insert_scheduled )( context, node, priority );
825  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
826 
828  context,
829  lowest_scheduled,
831  );
832  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
834  lowest_scheduled,
836  );
838 
839  ( *insert_scheduled )( context, node, priority );
840  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
841 
843  node,
844  lowest_scheduled,
845  _Scheduler_Node_get_idle( lowest_scheduled )
846  );
847  } else {
848  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
850  }
851 }
852 
875 static inline bool _Scheduler_SMP_Enqueue(
876  Scheduler_Context *context,
877  Scheduler_Node *node,
878  Priority_Control insert_priority,
879  Chain_Node_order order,
880  Scheduler_SMP_Insert insert_ready,
881  Scheduler_SMP_Insert insert_scheduled,
882  Scheduler_SMP_Move move_from_scheduled_to_ready,
883  Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
884  Scheduler_SMP_Allocate_processor allocate_processor
885 )
886 {
887  bool needs_help;
888  Scheduler_Node *lowest_scheduled;
889 
890  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
891 
892  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
894  context,
895  node,
896  insert_priority,
897  lowest_scheduled,
898  insert_scheduled,
899  move_from_scheduled_to_ready,
900  allocate_processor
901  );
902  needs_help = false;
903  } else {
904  ( *insert_ready )( context, node, insert_priority );
905  needs_help = true;
906  }
907 
908  return needs_help;
909 }
910 
931  Scheduler_Context *context,
932  Scheduler_Node *const node,
933  Priority_Control insert_priority,
934  Chain_Node_order order,
935  Scheduler_SMP_Extract extract_from_ready,
936  Scheduler_SMP_Get_highest_ready get_highest_ready,
937  Scheduler_SMP_Insert insert_ready,
938  Scheduler_SMP_Insert insert_scheduled,
939  Scheduler_SMP_Move move_from_ready_to_scheduled,
940  Scheduler_SMP_Allocate_processor allocate_processor
941 )
942 {
943  while ( true ) {
944  Scheduler_Node *highest_ready;
946 
947  highest_ready = ( *get_highest_ready )( context, node );
948 
949  /*
950  * The node has been extracted from the scheduled chain. We have to place
951  * it now on the scheduled or ready set.
952  */
953  if (
954  node->sticky_level > 0
955  && ( *order )( &insert_priority, &highest_ready->Node.Chain )
956  ) {
957  ( *insert_scheduled )( context, node, insert_priority );
958 
959  if ( _Scheduler_Node_get_idle( node ) != NULL ) {
960  Thread_Control *owner;
961  ISR_lock_Context lock_context;
962 
963  owner = _Scheduler_Node_get_owner( node );
964  _Thread_Scheduler_acquire_critical( owner, &lock_context );
965 
966  if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
968  owner,
969  _Thread_Get_CPU( owner )
970  );
972  context,
973  owner,
974  node,
976  );
978  }
979 
980  _Thread_Scheduler_release_critical( owner, &lock_context );
981  }
982 
983  return false;
984  }
985 
987  context,
988  highest_ready,
989  _Scheduler_Node_get_idle( node ),
991  );
992 
993  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
994  Thread_Control *idle;
995 
997  context,
998  highest_ready,
999  node,
1000  allocate_processor
1001  );
1002 
1003  ( *insert_ready )( context, node, insert_priority );
1004  ( *move_from_ready_to_scheduled )( context, highest_ready );
1005 
1007  context,
1008  node,
1010  );
1011  return ( idle == NULL );
1012  } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
1015  highest_ready,
1017  );
1018 
1019  ( *insert_ready )( context, node, insert_priority );
1020  ( *move_from_ready_to_scheduled )( context, highest_ready );
1021 
1023  highest_ready,
1024  node,
1025  _Scheduler_Node_get_idle( node )
1026  );
1027  return false;
1028  } else {
1029  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1030 
1032  highest_ready,
1034  );
1035 
1036  ( *extract_from_ready )( context, highest_ready );
1037  }
1038  }
1039 }
1040 
1048  Scheduler_Context *context,
1049  Scheduler_Node *node
1050 )
1051 {
1052  (void) context;
1054 }
1055 
1071  Scheduler_Context *context,
1072  Scheduler_Node *victim,
1073  Per_CPU_Control *victim_cpu,
1074  Scheduler_SMP_Extract extract_from_ready,
1075  Scheduler_SMP_Get_highest_ready get_highest_ready,
1076  Scheduler_SMP_Move move_from_ready_to_scheduled,
1077  Scheduler_SMP_Allocate_processor allocate_processor
1078 )
1079 {
1081 
1082  do {
1083  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1084 
1086  context,
1087  highest_ready,
1088  NULL,
1090  );
1091 
1092  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1094  context,
1095  highest_ready,
1096  victim,
1097  victim_cpu,
1098  allocate_processor
1099  );
1100 
1101  ( *move_from_ready_to_scheduled )( context, highest_ready );
1102  } else {
1103  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1104 
1106  highest_ready,
1108  );
1109 
1110  ( *extract_from_ready )( context, highest_ready );
1111  }
1112  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1113 }
1114 
1130  Scheduler_Context *context,
1131  Scheduler_Node *victim,
1132  Per_CPU_Control *victim_cpu,
1133  Scheduler_SMP_Extract extract_from_ready,
1134  Scheduler_SMP_Get_highest_ready get_highest_ready,
1135  Scheduler_SMP_Move move_from_ready_to_scheduled,
1136  Scheduler_SMP_Allocate_processor allocate_processor
1137 )
1138 {
1140 
1141  do {
1142  Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1143 
1145  context,
1146  highest_ready,
1147  NULL,
1149  );
1150 
1151  if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1153  context,
1154  highest_ready,
1155  victim,
1156  allocate_processor
1157  );
1158 
1159  ( *move_from_ready_to_scheduled )( context, highest_ready );
1160  } else {
1161  _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1162 
1164  highest_ready,
1166  );
1167 
1168  ( *extract_from_ready )( context, highest_ready );
1169  }
1170  } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1171 }
1172 
1189 static inline void _Scheduler_SMP_Block(
1190  Scheduler_Context *context,
1191  Thread_Control *thread,
1192  Scheduler_Node *node,
1193  Scheduler_SMP_Extract extract_from_scheduled,
1194  Scheduler_SMP_Extract extract_from_ready,
1195  Scheduler_SMP_Get_highest_ready get_highest_ready,
1196  Scheduler_SMP_Move move_from_ready_to_scheduled,
1197  Scheduler_SMP_Allocate_processor allocate_processor
1198 )
1199 {
1200  Scheduler_SMP_Node_state node_state;
1201  Per_CPU_Control *thread_cpu;
1202 
1203  node_state = _Scheduler_SMP_Node_state( node );
1204 
1205  thread_cpu = _Scheduler_Block_node(
1206  context,
1207  thread,
1208  node,
1209  node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1211  );
1212 
1213  if ( thread_cpu != NULL ) {
1215 
1216  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1217  ( *extract_from_scheduled )( context, node );
1219  context,
1220  node,
1221  thread_cpu,
1222  extract_from_ready,
1223  get_highest_ready,
1224  move_from_ready_to_scheduled,
1225  allocate_processor
1226  );
1227  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1228  ( *extract_from_ready )( context, node );
1229  }
1230  }
1231 }
1232 
1243 static inline void _Scheduler_SMP_Unblock(
1244  Scheduler_Context *context,
1245  Thread_Control *thread,
1246  Scheduler_Node *node,
1247  Scheduler_SMP_Update update,
1248  Scheduler_SMP_Enqueue enqueue
1249 )
1250 {
1251  Scheduler_SMP_Node_state node_state;
1252  bool unblock;
1253 
1254  node_state = _Scheduler_SMP_Node_state( node );
1255  unblock = _Scheduler_Unblock_node(
1256  context,
1257  thread,
1258  node,
1259  node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1261  );
1262 
1263  if ( unblock ) {
1264  Priority_Control priority;
1265  bool needs_help;
1266 
1267  priority = _Scheduler_Node_get_priority( node );
1268  priority = SCHEDULER_PRIORITY_PURIFY( priority );
1269 
1270  if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1271  ( *update )( context, node, priority );
1272  }
1273 
1274  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1275  Priority_Control insert_priority;
1276 
1278  insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1279  needs_help = ( *enqueue )( context, node, insert_priority );
1280  } else {
1281  _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1282  _Assert( node->sticky_level > 0 );
1283  _Assert( node->idle == NULL );
1284  needs_help = true;
1285  }
1286 
1287  if ( needs_help ) {
1288  _Scheduler_Ask_for_help( thread );
1289  }
1290  }
1291 }
1292 
1313  Scheduler_Context *context,
1314  Thread_Control *thread,
1315  Scheduler_Node *node,
1316  Scheduler_SMP_Extract extract_from_ready,
1317  Scheduler_SMP_Update update,
1318  Scheduler_SMP_Enqueue enqueue,
1319  Scheduler_SMP_Enqueue enqueue_scheduled,
1320  Scheduler_SMP_Ask_for_help ask_for_help
1321 )
1322 {
1323  Priority_Control priority;
1324  Priority_Control insert_priority;
1325  Scheduler_SMP_Node_state node_state;
1326 
1327  insert_priority = _Scheduler_Node_get_priority( node );
1328  priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1329 
1330  if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1331  if ( _Thread_Is_ready( thread ) ) {
1332  ( *ask_for_help )( context, thread, node );
1333  }
1334 
1335  return;
1336  }
1337 
1338  node_state = _Scheduler_SMP_Node_state( node );
1339 
1340  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1341  _Scheduler_SMP_Extract_from_scheduled( context, node );
1342  ( *update )( context, node, priority );
1343  ( *enqueue_scheduled )( context, node, insert_priority );
1344  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1345  ( *extract_from_ready )( context, node );
1346  ( *update )( context, node, priority );
1347  ( *enqueue )( context, node, insert_priority );
1348  } else {
1349  ( *update )( context, node, priority );
1350 
1351  if ( _Thread_Is_ready( thread ) ) {
1352  ( *ask_for_help )( context, thread, node );
1353  }
1354  }
1355 }
1356 
1368 static inline void _Scheduler_SMP_Yield(
1369  Scheduler_Context *context,
1370  Thread_Control *thread,
1371  Scheduler_Node *node,
1372  Scheduler_SMP_Extract extract_from_ready,
1373  Scheduler_SMP_Enqueue enqueue,
1374  Scheduler_SMP_Enqueue enqueue_scheduled
1375 )
1376 {
1377  bool needs_help;
1378  Scheduler_SMP_Node_state node_state;
1379  Priority_Control insert_priority;
1380 
1381  node_state = _Scheduler_SMP_Node_state( node );
1382  insert_priority = _Scheduler_SMP_Node_priority( node );
1383  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1384 
1385  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1386  _Scheduler_SMP_Extract_from_scheduled( context, node );
1387  ( *enqueue_scheduled )( context, node, insert_priority );
1388  needs_help = false;
1389  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1390  ( *extract_from_ready )( context, node );
1391 
1392  needs_help = ( *enqueue )( context, node, insert_priority );
1393  } else {
1394  needs_help = true;
1395  }
1396 
1397  if ( needs_help ) {
1398  _Scheduler_Ask_for_help( thread );
1399  }
1400 }
1401 
1410  Scheduler_Context *context,
1411  Scheduler_Node *node_to_insert,
1412  Priority_Control priority_to_insert
1413 )
1414 {
1415  Scheduler_SMP_Context *self;
1416 
1417  self = _Scheduler_SMP_Get_self( context );
1418 
1420  &self->Scheduled,
1421  &node_to_insert->Node.Chain,
1422  &priority_to_insert,
1424  );
1425 }
1426 
1449 static inline bool _Scheduler_SMP_Ask_for_help(
1450  Scheduler_Context *context,
1451  Thread_Control *thread,
1452  Scheduler_Node *node,
1453  Chain_Node_order order,
1454  Scheduler_SMP_Insert insert_ready,
1455  Scheduler_SMP_Insert insert_scheduled,
1456  Scheduler_SMP_Move move_from_scheduled_to_ready,
1457  Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1458  Scheduler_SMP_Allocate_processor allocate_processor
1459 )
1460 {
1461  Scheduler_Node *lowest_scheduled;
1462  ISR_lock_Context lock_context;
1463  bool success;
1464 
1465  if ( thread->Scheduler.pinned_scheduler != NULL ) {
1466  /*
1467  * Pinned threads are not allowed to ask for help. Return success to break
1468  * the loop in _Thread_Ask_for_help() early.
1469  */
1470  return true;
1471  }
1472 
1473  lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1474 
1475  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1476 
1477  if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1478  Scheduler_SMP_Node_state node_state;
1479 
1480  node_state = _Scheduler_SMP_Node_state( node );
1481 
1482  if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1483  Priority_Control insert_priority;
1484 
1485  insert_priority = _Scheduler_SMP_Node_priority( node );
1486 
1487  if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1489  thread,
1490  _Thread_Get_CPU( thread )
1491  );
1493  _Thread_Scheduler_release_critical( thread, &lock_context );
1494 
1496  context,
1497  node,
1498  lowest_scheduled,
1499  allocate_processor
1500  );
1501 
1502  ( *insert_scheduled )( context, node, insert_priority );
1503  ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1504 
1506  context,
1507  lowest_scheduled,
1509  );
1510  success = true;
1511  } else {
1512  _Thread_Scheduler_release_critical( thread, &lock_context );
1514  ( *insert_ready )( context, node, insert_priority );
1515  success = false;
1516  }
1517  } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1519  thread,
1520  _Thread_Get_CPU( thread )
1521  );
1523  context,
1524  thread,
1525  node,
1527  );
1529  _Thread_Scheduler_release_critical( thread, &lock_context );
1530  success = true;
1531  } else {
1532  _Thread_Scheduler_release_critical( thread, &lock_context );
1533  success = false;
1534  }
1535  } else {
1536  _Thread_Scheduler_release_critical( thread, &lock_context );
1537  success = false;
1538  }
1539 
1540  return success;
1541 }
1542 
1553  Scheduler_Context *context,
1554  Thread_Control *thread,
1555  Scheduler_Node *node,
1556  Scheduler_SMP_Extract extract_from_ready
1557 )
1558 {
1559  ISR_lock_Context lock_context;
1560 
1561  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1562 
1563  if (
1566  && node->sticky_level == 1
1567  ) {
1569  ( *extract_from_ready )( context, node );
1570  }
1571 
1572  _Thread_Scheduler_release_critical( thread, &lock_context );
1573 }
1574 
1590 static inline void _Scheduler_SMP_Withdraw_node(
1591  Scheduler_Context *context,
1592  Thread_Control *thread,
1593  Scheduler_Node *node,
1594  Thread_Scheduler_state next_state,
1595  Scheduler_SMP_Extract extract_from_ready,
1596  Scheduler_SMP_Get_highest_ready get_highest_ready,
1597  Scheduler_SMP_Move move_from_ready_to_scheduled,
1598  Scheduler_SMP_Allocate_processor allocate_processor
1599 )
1600 {
1601  ISR_lock_Context lock_context;
1602  Scheduler_SMP_Node_state node_state;
1603 
1604  _Thread_Scheduler_acquire_critical( thread, &lock_context );
1605 
1606  node_state = _Scheduler_SMP_Node_state( node );
1608 
1609  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1610  Per_CPU_Control *thread_cpu;
1611 
1612  thread_cpu = _Thread_Get_CPU( thread );
1613  _Scheduler_Thread_change_state( thread, next_state );
1614  _Thread_Scheduler_release_critical( thread, &lock_context );
1615 
1616  _Scheduler_SMP_Extract_from_scheduled( context, node );
1618  context,
1619  node,
1620  thread_cpu,
1621  extract_from_ready,
1622  get_highest_ready,
1623  move_from_ready_to_scheduled,
1624  allocate_processor
1625  );
1626  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1627  _Thread_Scheduler_release_critical( thread, &lock_context );
1628  ( *extract_from_ready )( context, node );
1629  } else {
1630  _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1631  _Thread_Scheduler_release_critical( thread, &lock_context );
1632  }
1633 }
1634 
1643 static inline void _Scheduler_SMP_Do_start_idle(
1644  Scheduler_Context *context,
1645  Thread_Control *idle,
1646  Per_CPU_Control *cpu,
1647  Scheduler_SMP_Register_idle register_idle
1648 )
1649 {
1650  Scheduler_SMP_Context *self;
1651  Scheduler_SMP_Node *node;
1652 
1653  self = _Scheduler_SMP_Get_self( context );
1654  node = _Scheduler_SMP_Thread_get_node( idle );
1655 
1658 
1659  _Thread_Set_CPU( idle, cpu );
1660  ( *register_idle )( context, &node->Base, cpu );
1661  _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1662  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1663 }
1664 
1674 static inline void _Scheduler_SMP_Add_processor(
1675  Scheduler_Context *context,
1676  Thread_Control *idle,
1677  Scheduler_SMP_Has_ready has_ready,
1678  Scheduler_SMP_Enqueue enqueue_scheduled,
1679  Scheduler_SMP_Register_idle register_idle
1680 )
1681 {
1682  Scheduler_SMP_Context *self;
1683  Scheduler_Node *node;
1684 
1685  self = _Scheduler_SMP_Get_self( context );
1687  _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1688  node = _Thread_Scheduler_get_home_node( idle );
1690  ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1691 
1692  if ( ( *has_ready )( &self->Base ) ) {
1693  Priority_Control insert_priority;
1694 
1695  insert_priority = _Scheduler_SMP_Node_priority( node );
1696  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1697  ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1698  } else {
1699  _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1700  }
1701 }
1702 
1715  Scheduler_Context *context,
1716  Per_CPU_Control *cpu,
1717  Scheduler_SMP_Extract extract_from_ready,
1718  Scheduler_SMP_Enqueue enqueue
1719 )
1720 {
1721  Scheduler_SMP_Context *self;
1722  Chain_Node *chain_node;
1723  Scheduler_Node *victim_node;
1724  Thread_Control *victim_user;
1725  Thread_Control *victim_owner;
1726  Thread_Control *idle;
1727 
1728  self = _Scheduler_SMP_Get_self( context );
1729  chain_node = _Chain_First( &self->Scheduled );
1730 
1731  do {
1732  _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1733  victim_node = (Scheduler_Node *) chain_node;
1734  victim_user = _Scheduler_Node_get_user( victim_node );
1735  chain_node = _Chain_Next( chain_node );
1736  } while ( _Thread_Get_CPU( victim_user ) != cpu );
1737 
1738  _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
1739  victim_owner = _Scheduler_Node_get_owner( victim_node );
1740 
1741  if ( !victim_owner->is_idle ) {
1742  Scheduler_Node *idle_node;
1743 
1745  &self->Base,
1746  victim_node,
1748  );
1749  idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1750  idle_node = _Thread_Scheduler_get_home_node( idle );
1751  ( *extract_from_ready )( &self->Base, idle_node );
1753  &self->Base,
1754  idle_node,
1755  victim_node,
1757  );
1758 
1759  if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1760  Priority_Control insert_priority;
1761 
1762  insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1763  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1764  ( *enqueue )( context, victim_node, insert_priority );
1765  }
1766  } else {
1767  _Assert( victim_owner == victim_user );
1768  _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1769  idle = victim_owner;
1771  }
1772 
1773  return idle;
1774 }
1775 
1795 static inline void _Scheduler_SMP_Set_affinity(
1796  Scheduler_Context *context,
1797  Thread_Control *thread,
1798  Scheduler_Node *node,
1799  void *arg,
1800  Scheduler_SMP_Set_affinity set_affinity,
1801  Scheduler_SMP_Extract extract_from_ready,
1802  Scheduler_SMP_Get_highest_ready get_highest_ready,
1803  Scheduler_SMP_Move move_from_ready_to_scheduled,
1804  Scheduler_SMP_Enqueue enqueue,
1805  Scheduler_SMP_Allocate_processor allocate_processor
1806 )
1807 {
1808  Scheduler_SMP_Node_state node_state;
1809  Priority_Control insert_priority;
1810 
1811  node_state = _Scheduler_SMP_Node_state( node );
1812  insert_priority = _Scheduler_SMP_Node_priority( node );
1813  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1814 
1815  if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1816  _Scheduler_SMP_Extract_from_scheduled( context, node );
1818  context,
1819  node,
1820  _Thread_Get_CPU( thread ),
1821  extract_from_ready,
1822  get_highest_ready,
1823  move_from_ready_to_scheduled,
1824  allocate_processor
1825  );
1826  ( *set_affinity )( context, node, arg );
1827  ( *enqueue )( context, node, insert_priority );
1828  } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1829  ( *extract_from_ready )( context, node );
1830  ( *set_affinity )( context, node, arg );
1831  ( *enqueue )( context, node, insert_priority );
1832  } else {
1833  _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1834  ( *set_affinity )( context, node, arg );
1835  }
1836 }
1837 
1840 #ifdef __cplusplus
1841 }
1842 #endif /* __cplusplus */
1843 
1844 #endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
static void _Scheduler_SMP_Release_idle_thread(Scheduler_Context *context, Thread_Control *idle)
Releases the thread and adds it to the idle threads.
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:46
static void _Scheduler_SMP_Node_change_state(Scheduler_Node *node, Scheduler_SMP_Node_state new_state)
Changes the state of the node to the given state.
int sticky_level
The sticky level determines if this scheduler node should use an idle thread in case this node is sch...
bool(* Chain_Node_order)(const void *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:844
static __inline__ Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain&#39;s first node.
Definition: chainimpl.h:260
SMP Scheduler API.
static void _Scheduler_SMP_Add_processor(Scheduler_Context *context, Thread_Control *idle, Scheduler_SMP_Has_ready has_ready, Scheduler_SMP_Enqueue enqueue_scheduled, Scheduler_SMP_Register_idle register_idle)
Adds the idle thread to the processor.
static void _Scheduler_SMP_Do_nothing_register_idle(Scheduler_Context *context, Scheduler_Node *idle, Per_CPU_Control *cpu)
Does nothing.
Scheduler_Try_to_schedule_action
This enumeration defines what a scheduler should do with a node which could be scheduled.
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
Inlined Routines Associated with the Manipulation of the Priority-Based Scheduling Structures...
static Scheduler_SMP_Node * _Scheduler_SMP_Thread_get_own_node(Thread_Control *thread)
Gets the scheduler smp node of the thread.
Scheduler context.
Definition: scheduler.h:247
static __inline__ void _Scheduler_Exchange_idle_thread(Scheduler_Node *needs_idle, Scheduler_Node *uses_idle, Thread_Control *idle)
Exchanges an idle thread from the scheduler node that uses it right now to another scheduler node...
static void _Scheduler_SMP_Extract_from_scheduled(Scheduler_Context *context, Scheduler_Node *node)
Extracts a scheduled node from the scheduled nodes.
static void _Scheduler_SMP_Withdraw_node(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Thread_Scheduler_state next_state, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Withdraws the node.
static Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(const Scheduler_Node *node)
Gets the state of the node.
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
union Scheduler_Node::@18 Node
Chain node for usage in various scheduler data structures.
static __inline__ bool _Thread_Is_executing_on_a_processor(const Thread_Control *the_thread)
Checks if the thread executes currently on some processor in the system.
Definition: threadimpl.h:930
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:114
static void _Scheduler_SMP_Reconsider_help_request(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready)
Reconsiders help request.
static Thread_Control * _Scheduler_SMP_Get_idle_thread(Scheduler_Context *context)
Gets The first idle thread of the given context.
static Scheduler_SMP_Node * _Scheduler_SMP_Thread_get_node(Thread_Control *thread)
Gets the scheduler smp node of the thread.
static bool _Scheduler_SMP_Ask_for_help(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Chain_Node_order order, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_scheduled_to_ready, Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Asks for help.
This scheduler node is ready.
Definition: schedulersmp.h:94
This thread is scheduled with respect to the scheduler.
Definition: thread.h:229
This thread is ready with respect to the scheduler.
Definition: thread.h:236
static __inline__ Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Gets the first node (unprotected).
Definition: chainimpl.h:592
static Scheduler_SMP_Context * _Scheduler_SMP_Get_self(Scheduler_Context *context)
Gets the scheduler smp context.
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:764
const struct Scheduler_Context * context
The scheduler context of the scheduler owning this processor.
Definition: percpu.h:522
The scheduler node is scheduled.
Definition: schedulersmp.h:86
static Thread_Control * _Scheduler_SMP_Preempt(Scheduler_Context *context, Scheduler_Node *scheduled, Scheduler_Node *victim, Scheduler_SMP_Allocate_processor allocate_processor)
Preempts the victim&#39;s thread and allocates a cpu for the scheduled thread.
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:70
static bool _Scheduler_SMP_Priority_less_equal(const void *to_insert, const Chain_Node *next)
Checks if to_insert is less or equal than the priority of the chain node.
static __inline__ Priority_Control _Scheduler_Node_get_priority(Scheduler_Node *node)
Gets the priority of the node.
static Thread_Control * _Scheduler_SMP_Remove_processor(Scheduler_Context *context, Per_CPU_Control *cpu, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Enqueue enqueue)
Removes an idle thread from the processor.
Chain_Control Threads_in_need_for_help
Chain of threads in need for help.
Definition: percpu.h:497
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:128
#define SCHEDULER_PRIORITY_PURIFY(priority)
Clears the priority append indicator bit.
static void _Scheduler_SMP_Block(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_scheduled, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Blocks the thread.
struct _Thread_Control * idle
The idle thread claimed by this node in case the sticky level is greater than zero and the thread is ...
Scheduler_SMP_Node_state state
The state of this node.
Definition: schedulersmp.h:109
size_t helping_nodes
Count of nodes scheduler nodes minus one.
Definition: thread.h:309
Information for the Assert Handler.
static void _Scheduler_SMP_Set_affinity(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, void *arg, Scheduler_SMP_Set_affinity set_affinity, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Allocate_processor allocate_processor)
Sets the affinity of the node.
static __inline__ void _Scheduler_Thread_change_state(Thread_Control *the_thread, Thread_Scheduler_state new_state)
Changes the threads state to the given new state.
static __inline__ void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Appends a node (unprotected).
Definition: chainimpl.h:680
static void _Scheduler_SMP_Enqueue_to_scheduled(Scheduler_Context *context, Scheduler_Node *node, Priority_Control priority, Scheduler_Node *lowest_scheduled, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_scheduled_to_ready, Scheduler_SMP_Allocate_processor allocate_processor)
Tries to schedule the given node.
static __inline__ void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepends a node (unprotected).
Definition: chainimpl.h:732
static __inline__ Per_CPU_Control * _Scheduler_Block_node(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, bool is_scheduled, Scheduler_Get_idle_thread get_idle_thread)
Blocks this scheduler node.
static void _Scheduler_SMP_Node_initialize(const Scheduler_Control *scheduler, Scheduler_SMP_Node *node, Thread_Control *thread, Priority_Control priority)
Initializes the scheduler smp node.
This scheduler node is blocked.
Definition: schedulersmp.h:76
static __inline__ void _Thread_Scheduler_cancel_need_for_help(Thread_Control *the_thread, Per_CPU_Control *cpu)
Cancels the thread&#39;s need for help.
Definition: threadimpl.h:1394
static void _Scheduler_SMP_Schedule_highest_ready(Scheduler_Context *context, Scheduler_Node *victim, Per_CPU_Control *victim_cpu, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Schedules the highest ready node.
Thread_Scheduler_state
The thread state with respect to the scheduler.
Definition: thread.h:214
Per CPU Core Structure.
Definition: percpu.h:347
static void _Scheduler_SMP_Allocate_processor_lazy(Scheduler_Context *context, Scheduler_Node *scheduled, Scheduler_Node *victim, Per_CPU_Control *victim_cpu)
Allocates the cpu for the scheduled thread.
static bool _Scheduler_SMP_Is_processor_owned_by_us(const Scheduler_Context *context, const Per_CPU_Control *cpu)
Checks if the processor is owned by the given context.
static __inline__ void _Scheduler_Ask_for_help(Thread_Control *the_thread)
Registers an ask for help request if necessary.
static __inline__ Scheduler_Node * _Thread_Scheduler_get_home_node(const Thread_Control *the_thread)
Gets the scheduler&#39;s home node.
Definition: threadimpl.h:1438
static __inline__ Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
Chain_Node Node
Definition: objectdata.h:41
static __inline__ void _Thread_Set_CPU(Thread_Control *thread, Per_CPU_Control *cpu)
Sets the cpu of the thread&#39;s scheduler.
Definition: threadimpl.h:886
static Priority_Control _Scheduler_SMP_Node_priority(const Scheduler_Node *node)
Gets the priority of the node.
static void _Scheduler_SMP_Preempt_and_schedule_highest_ready(Scheduler_Context *context, Scheduler_Node *victim, Per_CPU_Control *victim_cpu, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Schedules the highest ready node and preempts a currently executing one.
Objects_Control Object
Definition: thread.h:727
static __inline__ Thread_Control * _Scheduler_Node_get_owner(const Scheduler_Node *node)
Gets the owner of the node.
static __inline__ void _Scheduler_Node_do_initialize(const struct _Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a node.
static bool _Scheduler_SMP_Enqueue_scheduled(Scheduler_Context *context, Scheduler_Node *const node, Priority_Control insert_priority, Chain_Node_order order, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Enqueues a scheduled node according to the specified order function.
static __inline__ void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
static __inline__ Thread_Control * _Scheduler_Node_get_idle(const Scheduler_Node *node)
Gets the idle thread of the node.
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:100
static __inline__ void _Scheduler_Discard_idle_thread(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node, Scheduler_Release_idle_thread release_idle_thread)
Discard the idle thread from the scheduler node.
Chain_Node Help_node
Node for the Per_CPU_Control::Threads_in_need_for_help chain.
Definition: thread.h:302
static void _Scheduler_SMP_Do_start_idle(Scheduler_Context *context, Thread_Control *idle, Per_CPU_Control *cpu, Scheduler_SMP_Register_idle register_idle)
Starts the idle thread on the given processor.
static __inline__ void _Thread_Scheduler_acquire_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Acquires the lock context in a critical section.
Definition: threadimpl.h:1483
static __inline__ void _Thread_Scheduler_release_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Releases the lock context in a critical section.
Definition: threadimpl.h:1497
static void _Scheduler_SMP_Yield(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Enqueue enqueue_scheduled)
Performs a yield and asks for help if necessary.
Chain Handler API.
Thread_Scheduler_state state
The current scheduler state of this thread.
Definition: thread.h:253
static void _Scheduler_SMP_Exctract_idle_thread(Thread_Control *idle)
Extracts the node of the idle thread.
static __inline__ Thread_Control * _Scheduler_Release_idle_thread(Scheduler_Context *context, Scheduler_Node *node, Scheduler_Release_idle_thread release_idle_thread)
Releases an idle thread using this scheduler node.
const struct _Scheduler_Control * pinned_scheduler
The pinned scheduler of this thread.
Definition: thread.h:263
static __inline__ bool _Scheduler_Unblock_node(Scheduler_Context *context, Thread_Control *the_thread, Scheduler_Node *node, bool is_scheduled, Scheduler_Release_idle_thread release_idle_thread)
Unblocks this scheduler node.
static __inline__ Scheduler_Try_to_schedule_action _Scheduler_Try_to_schedule_node(Scheduler_Context *context, Scheduler_Node *node, const Thread_Control *idle, Scheduler_Get_idle_thread get_idle_thread)
Tries to schedule the scheduler node.
Interface to Kernel Print Methods.
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:436
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:104
static bool _Scheduler_SMP_Enqueue(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority, Chain_Node_order order, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_scheduled_to_ready, Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Enqueues a node according to the specified order function.
static __inline__ Per_CPU_Control * _Thread_Get_CPU(const Thread_Control *thread)
Gets the cpu of the thread&#39;s scheduler.
Definition: threadimpl.h:867
static void _Scheduler_SMP_Node_update_priority(Scheduler_SMP_Node *node, Priority_Control new_priority)
Updates the priority of the node to the new priority.
static Scheduler_SMP_Node * _Scheduler_SMP_Node_downcast(Scheduler_Node *node)
Gets the scheduler smp node.
static __inline__ void _Chain_Initialize_empty(Chain_Control *the_chain)
Initializes this chain as empty.
Definition: chainimpl.h:505
static void _Scheduler_SMP_Unblock(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Update update, Scheduler_SMP_Enqueue enqueue)
Unblocks the thread.
static Scheduler_Node * _Scheduler_SMP_Get_lowest_scheduled(Scheduler_Context *context, Scheduler_Node *filter)
Returns the lowest member of the scheduled nodes.
Scheduler control.
Definition: scheduler.h:264
Scheduler node for per-thread data.
Definition: schedulernode.h:79
static void _Scheduler_SMP_Insert_scheduled(Scheduler_Context *context, Scheduler_Node *node_to_insert, Priority_Control priority_to_insert)
Inserts the node with the given priority into the scheduled nodes.
static void _Scheduler_SMP_Allocate_processor_exact(Scheduler_Context *context, Scheduler_Node *scheduled, Scheduler_Node *victim, Per_CPU_Control *victim_cpu)
Allocates the cpu for the scheduled thread.
static __inline__ Chain_Node * _Chain_Last(const Chain_Control *the_chain)
Returns pointer to chain&#39;s last node.
Definition: chainimpl.h:294
static __inline__ void _Thread_Dispatch_update_heir(Per_CPU_Control *cpu_self, Per_CPU_Control *cpu_for_heir, Thread_Control *heir)
Updates the used cpu time for the heir and dispatches a new heir.
Definition: threadimpl.h:1177
static void _Scheduler_SMP_Allocate_processor(Scheduler_Context *context, Scheduler_Node *scheduled, Scheduler_Node *victim, Per_CPU_Control *victim_cpu, Scheduler_SMP_Allocate_processor allocate_processor)
Allocates the cpu for the scheduled thread using the given allocation function.
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
static __inline__ bool _Thread_Is_ready(const Thread_Control *the_thread)
Checks if the thread is ready.
Definition: threadimpl.h:401
static __inline__ void _Chain_Insert_ordered_unprotected(Chain_Control *the_chain, Chain_Node *to_insert, const void *left, Chain_Node_order order)
Inserts a node into the chain according to the order relation.
Definition: chainimpl.h:864
Chain_Node Chain
The node for Thread_Control::Scheduler::Scheduler_nodes.
Definition: schedulernode.h:91
static __inline__ bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
static __inline__ Thread_Control * _Scheduler_Node_get_user(const Scheduler_Node *node)
Gets the user of the node.
static __inline__ Chain_Node * _Chain_Tail(Chain_Control *the_chain)
Returns pointer to chain tail.
Definition: chainimpl.h:227
static __inline__ const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
static void _Scheduler_SMP_Update_priority(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Update update, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Enqueue enqueue_scheduled, Scheduler_SMP_Ask_for_help ask_for_help)
Updates the priority of the node and the position in the queues it is in.
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
static void _Scheduler_SMP_Initialize(Scheduler_SMP_Context *self)
Initializes the scheduler smp context.
bool is_idle
Definition: thread.h:789