23 #ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H 24 #define _RTEMS_SCORE_SCHEDULERSMPIMPL_H 278 typedef bool ( *Scheduler_SMP_Has_ready )(
292 typedef void ( *Scheduler_SMP_Extract )(
297 typedef void ( *Scheduler_SMP_Insert )(
303 typedef void ( *Scheduler_SMP_Move )(
308 typedef bool ( *Scheduler_SMP_Ask_for_help )(
314 typedef void ( *Scheduler_SMP_Update )(
320 typedef void ( *Scheduler_SMP_Set_affinity )(
326 typedef bool ( *Scheduler_SMP_Enqueue )(
332 typedef void ( *Scheduler_SMP_Allocate_processor )(
339 typedef void ( *Scheduler_SMP_Register_idle )(
352 static inline void _Scheduler_SMP_Do_nothing_register_idle(
372 static inline bool _Scheduler_SMP_Priority_less_equal(
373 const void *to_insert,
383 return *priority_to_insert <= node_next->
priority;
405 static inline void _Scheduler_SMP_Initialize(
491 static inline void _Scheduler_SMP_Node_initialize(
509 static inline void _Scheduler_SMP_Node_update_priority(
523 static inline void _Scheduler_SMP_Node_change_state(
530 the_node = _Scheduler_SMP_Node_downcast( node );
531 the_node->
state = new_state;
543 static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
548 return cpu->Scheduler.context ==
context;
577 static inline void _Scheduler_SMP_Release_idle_thread(
592 static inline void _Scheduler_SMP_Exctract_idle_thread(
609 static inline void _Scheduler_SMP_Allocate_processor_lazy(
616 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
617 Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
624 if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
625 if ( _Scheduler_SMP_Is_processor_owned_by_us(
context, scheduled_cpu ) ) {
626 heir = scheduled_cpu->
heir;
627 _Thread_Dispatch_update_heir(
634 heir = scheduled_thread;
637 heir = scheduled_thread;
640 if ( heir != victim_thread ) {
642 _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
659 static inline void _Scheduler_SMP_Allocate_processor_exact(
666 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
673 _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
685 static inline void _Scheduler_SMP_Allocate_processor(
690 Scheduler_SMP_Allocate_processor allocate_processor
694 ( *allocate_processor )(
context, scheduled, victim, victim_cpu );
711 Scheduler_SMP_Allocate_processor allocate_processor
718 victim_thread = _Scheduler_Node_get_user( victim );
721 _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
725 if ( victim_thread->
Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
726 _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
728 if ( victim_thread->
Scheduler.helping_nodes > 0 ) {
731 _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
733 &victim_cpu->Threads_in_need_for_help,
736 _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
740 _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
742 _Scheduler_SMP_Allocate_processor(
750 return victim_thread;
761 static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
778 return lowest_scheduled;
797 static inline void _Scheduler_SMP_Enqueue_to_scheduled(
802 Scheduler_SMP_Insert insert_scheduled,
803 Scheduler_SMP_Move move_from_scheduled_to_ready,
804 Scheduler_SMP_Allocate_processor allocate_processor
807 Scheduler_Try_to_schedule_action action;
809 action = _Scheduler_Try_to_schedule_node(
812 _Scheduler_Node_get_idle( lowest_scheduled ),
813 _Scheduler_SMP_Get_idle_thread
816 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
817 _Scheduler_SMP_Preempt(
824 ( *insert_scheduled )(
context, node, priority );
825 ( *move_from_scheduled_to_ready )(
context, lowest_scheduled );
827 _Scheduler_Release_idle_thread(
830 _Scheduler_SMP_Release_idle_thread
832 }
else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
833 _Scheduler_SMP_Node_change_state(
839 ( *insert_scheduled )(
context, node, priority );
840 ( *move_from_scheduled_to_ready )(
context, lowest_scheduled );
842 _Scheduler_Exchange_idle_thread(
845 _Scheduler_Node_get_idle( lowest_scheduled )
848 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
875 static inline bool _Scheduler_SMP_Enqueue(
880 Scheduler_SMP_Insert insert_ready,
881 Scheduler_SMP_Insert insert_scheduled,
882 Scheduler_SMP_Move move_from_scheduled_to_ready,
883 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
884 Scheduler_SMP_Allocate_processor allocate_processor
890 lowest_scheduled = ( *get_lowest_scheduled )(
context, node );
892 if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
893 _Scheduler_SMP_Enqueue_to_scheduled(
899 move_from_scheduled_to_ready,
904 ( *insert_ready )(
context, node, insert_priority );
930 static inline bool _Scheduler_SMP_Enqueue_scheduled(
935 Scheduler_SMP_Extract extract_from_ready,
936 Scheduler_SMP_Get_highest_ready get_highest_ready,
937 Scheduler_SMP_Insert insert_ready,
938 Scheduler_SMP_Insert insert_scheduled,
939 Scheduler_SMP_Move move_from_ready_to_scheduled,
940 Scheduler_SMP_Allocate_processor allocate_processor
945 Scheduler_Try_to_schedule_action action;
947 highest_ready = ( *get_highest_ready )(
context, node );
954 node->sticky_level > 0
955 && ( *order )( &insert_priority, &highest_ready->Node.Chain )
957 ( *insert_scheduled )(
context, node, insert_priority );
959 if ( _Scheduler_Node_get_idle( node ) !=
NULL ) {
964 _Thread_Scheduler_acquire_critical( owner, &lock_context );
966 if ( owner->
Scheduler.state == THREAD_SCHEDULER_READY ) {
967 _Thread_Scheduler_cancel_need_for_help(
971 _Scheduler_Discard_idle_thread(
975 _Scheduler_SMP_Release_idle_thread
977 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
980 _Thread_Scheduler_release_critical( owner, &lock_context );
986 action = _Scheduler_Try_to_schedule_node(
989 _Scheduler_Node_get_idle( node ),
990 _Scheduler_SMP_Get_idle_thread
993 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
996 _Scheduler_SMP_Preempt(
1003 ( *insert_ready )(
context, node, insert_priority );
1004 ( *move_from_ready_to_scheduled )(
context, highest_ready );
1006 idle = _Scheduler_Release_idle_thread(
1009 _Scheduler_SMP_Release_idle_thread
1011 return ( idle ==
NULL );
1012 }
else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
1014 _Scheduler_SMP_Node_change_state(
1019 ( *insert_ready )(
context, node, insert_priority );
1020 ( *move_from_ready_to_scheduled )(
context, highest_ready );
1022 _Scheduler_Exchange_idle_thread(
1025 _Scheduler_Node_get_idle( node )
1029 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1031 _Scheduler_SMP_Node_change_state(
1036 ( *extract_from_ready )(
context, highest_ready );
1047 static inline void _Scheduler_SMP_Extract_from_scheduled(
1070 static inline void _Scheduler_SMP_Schedule_highest_ready(
1074 Scheduler_SMP_Extract extract_from_ready,
1075 Scheduler_SMP_Get_highest_ready get_highest_ready,
1076 Scheduler_SMP_Move move_from_ready_to_scheduled,
1077 Scheduler_SMP_Allocate_processor allocate_processor
1080 Scheduler_Try_to_schedule_action action;
1085 action = _Scheduler_Try_to_schedule_node(
1089 _Scheduler_SMP_Get_idle_thread
1092 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1093 _Scheduler_SMP_Allocate_processor(
1101 ( *move_from_ready_to_scheduled )(
context, highest_ready );
1103 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1105 _Scheduler_SMP_Node_change_state(
1110 ( *extract_from_ready )(
context, highest_ready );
1112 }
while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1129 static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1133 Scheduler_SMP_Extract extract_from_ready,
1134 Scheduler_SMP_Get_highest_ready get_highest_ready,
1135 Scheduler_SMP_Move move_from_ready_to_scheduled,
1136 Scheduler_SMP_Allocate_processor allocate_processor
1139 Scheduler_Try_to_schedule_action action;
1144 action = _Scheduler_Try_to_schedule_node(
1148 _Scheduler_SMP_Get_idle_thread
1151 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1152 _Scheduler_SMP_Preempt(
1159 ( *move_from_ready_to_scheduled )(
context, highest_ready );
1161 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1163 _Scheduler_SMP_Node_change_state(
1168 ( *extract_from_ready )(
context, highest_ready );
1170 }
while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1189 static inline void _Scheduler_SMP_Block(
1193 Scheduler_SMP_Extract extract_from_scheduled,
1194 Scheduler_SMP_Extract extract_from_ready,
1195 Scheduler_SMP_Get_highest_ready get_highest_ready,
1196 Scheduler_SMP_Move move_from_ready_to_scheduled,
1197 Scheduler_SMP_Allocate_processor allocate_processor
1203 node_state = _Scheduler_SMP_Node_state( node );
1205 thread_cpu = _Scheduler_Block_node(
1210 _Scheduler_SMP_Get_idle_thread
1213 if ( thread_cpu !=
NULL ) {
1217 ( *extract_from_scheduled )(
context, node );
1218 _Scheduler_SMP_Schedule_highest_ready(
1224 move_from_ready_to_scheduled,
1228 ( *extract_from_ready )(
context, node );
1243 static inline void _Scheduler_SMP_Unblock(
1247 Scheduler_SMP_Update update,
1248 Scheduler_SMP_Enqueue enqueue
1254 node_state = _Scheduler_SMP_Node_state( node );
1255 unblock = _Scheduler_Unblock_node(
1260 _Scheduler_SMP_Release_idle_thread
1270 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1271 ( *update )(
context, node, priority );
1279 needs_help = ( *enqueue )(
context, node, insert_priority );
1282 _Assert( node->sticky_level > 0 );
1288 _Scheduler_Ask_for_help( thread );
1312 static inline void _Scheduler_SMP_Update_priority(
1316 Scheduler_SMP_Extract extract_from_ready,
1317 Scheduler_SMP_Update update,
1318 Scheduler_SMP_Enqueue enqueue,
1319 Scheduler_SMP_Enqueue enqueue_scheduled,
1320 Scheduler_SMP_Ask_for_help ask_for_help
1330 if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1332 ( *ask_for_help )(
context, thread, node );
1338 node_state = _Scheduler_SMP_Node_state( node );
1341 _Scheduler_SMP_Extract_from_scheduled(
context, node );
1342 ( *update )(
context, node, priority );
1343 ( *enqueue_scheduled )(
context, node, insert_priority );
1345 ( *extract_from_ready )(
context, node );
1346 ( *update )(
context, node, priority );
1347 ( *enqueue )(
context, node, insert_priority );
1349 ( *update )(
context, node, priority );
1352 ( *ask_for_help )(
context, thread, node );
1368 static inline void _Scheduler_SMP_Yield(
1372 Scheduler_SMP_Extract extract_from_ready,
1373 Scheduler_SMP_Enqueue enqueue,
1374 Scheduler_SMP_Enqueue enqueue_scheduled
1381 node_state = _Scheduler_SMP_Node_state( node );
1382 insert_priority = _Scheduler_SMP_Node_priority( node );
1386 _Scheduler_SMP_Extract_from_scheduled(
context, node );
1387 ( *enqueue_scheduled )(
context, node, insert_priority );
1390 ( *extract_from_ready )(
context, node );
1392 needs_help = ( *enqueue )(
context, node, insert_priority );
1398 _Scheduler_Ask_for_help( thread );
1409 static inline void _Scheduler_SMP_Insert_scheduled(
1417 self = _Scheduler_SMP_Get_self(
context );
1421 &node_to_insert->Node.Chain,
1422 &priority_to_insert,
1423 _Scheduler_SMP_Priority_less_equal
1449 static inline bool _Scheduler_SMP_Ask_for_help(
1454 Scheduler_SMP_Insert insert_ready,
1455 Scheduler_SMP_Insert insert_scheduled,
1456 Scheduler_SMP_Move move_from_scheduled_to_ready,
1457 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1458 Scheduler_SMP_Allocate_processor allocate_processor
1473 lowest_scheduled = ( *get_lowest_scheduled )(
context, node );
1475 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1477 if ( thread->
Scheduler.state == THREAD_SCHEDULER_READY ) {
1480 node_state = _Scheduler_SMP_Node_state( node );
1485 insert_priority = _Scheduler_SMP_Node_priority( node );
1487 if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1488 _Thread_Scheduler_cancel_need_for_help(
1492 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1493 _Thread_Scheduler_release_critical( thread, &lock_context );
1495 _Scheduler_SMP_Preempt(
1502 ( *insert_scheduled )(
context, node, insert_priority );
1503 ( *move_from_scheduled_to_ready )(
context, lowest_scheduled );
1505 _Scheduler_Release_idle_thread(
1508 _Scheduler_SMP_Release_idle_thread
1512 _Thread_Scheduler_release_critical( thread, &lock_context );
1514 ( *insert_ready )(
context, node, insert_priority );
1518 _Thread_Scheduler_cancel_need_for_help(
1522 _Scheduler_Discard_idle_thread(
1526 _Scheduler_SMP_Release_idle_thread
1528 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1529 _Thread_Scheduler_release_critical( thread, &lock_context );
1532 _Thread_Scheduler_release_critical( thread, &lock_context );
1536 _Thread_Scheduler_release_critical( thread, &lock_context );
1552 static inline void _Scheduler_SMP_Reconsider_help_request(
1556 Scheduler_SMP_Extract extract_from_ready
1561 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1564 thread->
Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1566 && node->sticky_level == 1
1569 ( *extract_from_ready )(
context, node );
1572 _Thread_Scheduler_release_critical( thread, &lock_context );
1590 static inline void _Scheduler_SMP_Withdraw_node(
1594 Thread_Scheduler_state next_state,
1595 Scheduler_SMP_Extract extract_from_ready,
1596 Scheduler_SMP_Get_highest_ready get_highest_ready,
1597 Scheduler_SMP_Move move_from_ready_to_scheduled,
1598 Scheduler_SMP_Allocate_processor allocate_processor
1604 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1606 node_state = _Scheduler_SMP_Node_state( node );
1613 _Scheduler_Thread_change_state( thread, next_state );
1614 _Thread_Scheduler_release_critical( thread, &lock_context );
1616 _Scheduler_SMP_Extract_from_scheduled(
context, node );
1617 _Scheduler_SMP_Schedule_highest_ready(
1623 move_from_ready_to_scheduled,
1627 _Thread_Scheduler_release_critical( thread, &lock_context );
1628 ( *extract_from_ready )(
context, node );
1631 _Thread_Scheduler_release_critical( thread, &lock_context );
1643 static inline void _Scheduler_SMP_Do_start_idle(
1647 Scheduler_SMP_Register_idle register_idle
1653 self = _Scheduler_SMP_Get_self(
context );
1654 node = _Scheduler_SMP_Thread_get_node( idle );
1656 _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1662 _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1674 static inline void _Scheduler_SMP_Add_processor(
1677 Scheduler_SMP_Has_ready has_ready,
1678 Scheduler_SMP_Enqueue enqueue_scheduled,
1679 Scheduler_SMP_Register_idle register_idle
1685 self = _Scheduler_SMP_Get_self(
context );
1686 idle->
Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1687 _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1692 if ( ( *has_ready )( &
self->Base ) ) {
1695 insert_priority = _Scheduler_SMP_Node_priority( node );
1697 ( *enqueue_scheduled )( &
self->Base, node, insert_priority );
1717 Scheduler_SMP_Extract extract_from_ready,
1718 Scheduler_SMP_Enqueue enqueue
1728 self = _Scheduler_SMP_Get_self(
context );
1734 victim_user = _Scheduler_Node_get_user( victim_node );
1738 _Scheduler_SMP_Extract_from_scheduled(
context, victim_node );
1741 if ( !victim_owner->
is_idle ) {
1744 _Scheduler_Release_idle_thread(
1747 _Scheduler_SMP_Release_idle_thread
1749 idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1751 ( *extract_from_ready )( &
self->Base, idle_node );
1752 _Scheduler_SMP_Preempt(
1756 _Scheduler_SMP_Allocate_processor_exact
1762 insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1764 ( *enqueue )(
context, victim_node, insert_priority );
1767 _Assert( victim_owner == victim_user );
1768 _Assert( _Scheduler_Node_get_idle( victim_node ) ==
NULL );
1769 idle = victim_owner;
1770 _Scheduler_SMP_Exctract_idle_thread( idle );
1795 static inline void _Scheduler_SMP_Set_affinity(
1800 Scheduler_SMP_Set_affinity set_affinity,
1801 Scheduler_SMP_Extract extract_from_ready,
1802 Scheduler_SMP_Get_highest_ready get_highest_ready,
1803 Scheduler_SMP_Move move_from_ready_to_scheduled,
1804 Scheduler_SMP_Enqueue enqueue,
1805 Scheduler_SMP_Allocate_processor allocate_processor
1811 node_state = _Scheduler_SMP_Node_state( node );
1812 insert_priority = _Scheduler_SMP_Node_priority( node );
1816 _Scheduler_SMP_Extract_from_scheduled(
context, node );
1817 _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1823 move_from_ready_to_scheduled,
1826 ( *set_affinity )(
context, node, arg );
1827 ( *enqueue )(
context, node, insert_priority );
1829 ( *extract_from_ready )(
context, node );
1830 ( *set_affinity )(
context, node, arg );
1831 ( *enqueue )(
context, node, insert_priority );
1834 ( *set_affinity )(
context, node, arg );
RTEMS_INLINE_ROUTINE Thread_Control * _Scheduler_Node_get_owner(const Scheduler_Node *node)
Gets the owner of the node.
Definition: schedulernodeimpl.h:135
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:46
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
bool(* Chain_Node_order)(const void *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:844
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Tail(Chain_Control *the_chain)
Returns pointer to chain tail.
Definition: chainimpl.h:227
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
Inlined Routines Associated with the Manipulation of the Priority-Based Scheduling Structures.
Scheduler context.
Definition: scheduler.h:252
RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(Thread_Control *thread, Per_CPU_Control *cpu)
Sets the cpu of the thread's scheduler.
Definition: threadimpl.h:860
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
Definition: schedulernodeimpl.h:72
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:114
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(Chain_Control *the_chain)
Initializes this chain as empty.
Definition: chainimpl.h:505
This scheduler node is ready.
Definition: schedulersmp.h:94
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:771
The scheduler node is scheduled.
Definition: schedulersmp.h:86
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:70
RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(const struct _Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a node.
Definition: schedulernodeimpl.h:91
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:128
#define SCHEDULER_PRIORITY_PURIFY(priority)
Clears the priority append indicator bit.
Definition: schedulernodeimpl.h:66
Scheduler_SMP_Node_state state
The state of this node.
Definition: schedulersmp.h:109
Information for the Assert Handler.
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(Scheduler_Node *node)
Gets the priority of the node.
Definition: schedulernodeimpl.h:149
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain's first node.
Definition: chainimpl.h:260
This scheduler node is blocked.
Definition: schedulersmp.h:76
Per CPU Core Structure.
Definition: percpu.h:347
Chain_Node Node
Definition: objectdata.h:41
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Gets the first node (unprotected).
Definition: chainimpl.h:592
Objects_Control Object
Definition: thread.h:734
RTEMS_INLINE_ROUTINE const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:100
RTEMS_INLINE_ROUTINE Per_CPU_Control * _Thread_Get_CPU(const Thread_Control *thread)
Gets the cpu of the thread's scheduler.
Definition: threadimpl.h:841
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Last(const Chain_Control *the_chain)
Returns pointer to chain's last node.
Definition: chainimpl.h:294
RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(Chain_Control *the_chain, Chain_Node *to_insert, const void *left, Chain_Node_order order)
Inserts a node into the chain according to the order relation.
Definition: chainimpl.h:864
Interface to Kernel Print Methods.
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:436
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:104
RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Appends a node (unprotected).
Definition: chainimpl.h:680
unsigned context
Definition: tlb.h:108
Scheduler control.
Definition: scheduler.h:269
Scheduler node for per-thread data.
Definition: schedulernode.h:79
RTEMS_INLINE_ROUTINE bool _Thread_Is_ready(const Thread_Control *the_thread)
Checks if the thread is ready.
Definition: threadimpl.h:375
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepends a node (unprotected).
Definition: chainimpl.h:732
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
RTEMS_INLINE_ROUTINE Scheduler_Node * _Thread_Scheduler_get_home_node(const Thread_Control *the_thread)
Gets the scheduler's home node.
Definition: threadimpl.h:1412
#define NULL
Requests a GPIO pin group configuration.
Definition: bestcomm_api.h:77
bool is_idle
Definition: thread.h:796