RTEMS 7.0-rc1
Loading...
Searching...
No Matches
schedulersmpimpl.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * Copyright (C) 2013, 2021 embedded brains GmbH & Co. KG
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
38#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
39
41#include <rtems/score/assert.h>
44#include <rtems/bspIo.h>
45
46#ifdef __cplusplus
47extern "C" {
48#endif /* __cplusplus */
49
292typedef bool ( *Scheduler_SMP_Has_ready )(
293 Scheduler_Context *context
294);
295
296typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
297 Scheduler_Context *context,
298 Scheduler_Node *filter
299);
300
301typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_ready )(
302 Scheduler_Context *context
303);
304
305typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
306 Scheduler_Context *context,
307 Scheduler_Node *filter
308);
309
310typedef void ( *Scheduler_SMP_Extract )(
311 Scheduler_Context *context,
312 Scheduler_Node *node_to_extract
313);
314
315typedef void ( *Scheduler_SMP_Insert )(
316 Scheduler_Context *context,
317 Scheduler_Node *node_to_insert,
318 Priority_Control insert_priority
319);
320
321typedef void ( *Scheduler_SMP_Move )(
322 Scheduler_Context *context,
323 Scheduler_Node *node_to_move
324);
325
326typedef bool ( *Scheduler_SMP_Ask_for_help )(
327 Scheduler_Context *context,
328 Thread_Control *thread,
329 Scheduler_Node *node
330);
331
332typedef void ( *Scheduler_SMP_Update )(
333 Scheduler_Context *context,
334 Scheduler_Node *node_to_update,
335 Priority_Control new_priority
336);
337
338typedef void ( *Scheduler_SMP_Set_affinity )(
339 Scheduler_Context *context,
340 Scheduler_Node *node,
341 void *arg
342);
343
344typedef bool ( *Scheduler_SMP_Enqueue )(
345 Scheduler_Context *context,
346 Scheduler_Node *node_to_enqueue,
347 Priority_Control priority
348);
349
350typedef void ( *Scheduler_SMP_Enqueue_scheduled )(
351 Scheduler_Context *context,
352 Scheduler_Node *node_to_enqueue,
353 Priority_Control priority
354);
355
356typedef void ( *Scheduler_SMP_Allocate_processor )(
357 Scheduler_Context *context,
358 Scheduler_Node *scheduled,
359 Per_CPU_Control *cpu
360);
361
362typedef void ( *Scheduler_SMP_Register_idle )(
363 Scheduler_Context *context,
364 Scheduler_Node *idle,
365 Per_CPU_Control *cpu
366);
367
375static inline void _Scheduler_SMP_Do_nothing_register_idle(
376 Scheduler_Context *context,
377 Scheduler_Node *idle,
378 Per_CPU_Control *cpu
379)
380{
381 (void) context;
382 (void) idle;
383 (void) cpu;
384}
385
398static inline bool _Scheduler_SMP_Priority_less_equal(
399 const void *key,
400 const Chain_Node *to_insert,
401 const Chain_Node *next
402)
403{
404 const Priority_Control *priority_to_insert;
405 const Scheduler_SMP_Node *node_next;
406
407 (void) to_insert;
408 priority_to_insert = (const Priority_Control *) key;
409 node_next = (const Scheduler_SMP_Node *) next;
410
411 return *priority_to_insert <= node_next->priority;
412}
413
421static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
422 Scheduler_Context *context
423)
424{
425 return (Scheduler_SMP_Context *) context;
426}
427
433static inline void _Scheduler_SMP_Initialize(
435)
436{
437 _Chain_Initialize_empty( &self->Scheduled );
438}
439
447static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
448 Thread_Control *thread
449)
450{
451 return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
452}
453
461static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
462 Thread_Control *thread
463)
464{
465 return (Scheduler_SMP_Node *) _Thread_Scheduler_get_home_node( thread );
466}
467
475static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
476 Scheduler_Node *node
477)
478{
479 return (Scheduler_SMP_Node *) node;
480}
481
489static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
490 const Scheduler_Node *node
491)
492{
493 return ( (const Scheduler_SMP_Node *) node )->state;
494}
495
503static inline Priority_Control _Scheduler_SMP_Node_priority(
504 const Scheduler_Node *node
505)
506{
507 return ( (const Scheduler_SMP_Node *) node )->priority;
508}
509
518static inline void _Scheduler_SMP_Node_initialize(
519 const Scheduler_Control *scheduler,
520 Scheduler_SMP_Node *node,
521 Thread_Control *thread,
522 Priority_Control priority
523)
524{
525 _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
526 node->state = SCHEDULER_SMP_NODE_BLOCKED;
527 node->priority = priority;
528}
529
536static inline void _Scheduler_SMP_Node_update_priority(
537 Scheduler_SMP_Node *node,
538 Priority_Control new_priority
539)
540{
541 node->priority = new_priority;
542}
543
550static inline void _Scheduler_SMP_Node_change_state(
551 Scheduler_Node *node,
553)
554{
555 Scheduler_SMP_Node *the_node;
556
557 the_node = _Scheduler_SMP_Node_downcast( node );
558 the_node->state = new_state;
559}
560
570static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
571 const Scheduler_Context *context,
572 const Per_CPU_Control *cpu
573)
574{
575 return cpu->Scheduler.context == context;
576}
577
589 Thread_Control *thread,
590 Per_CPU_Control *cpu
591);
592
600static inline void _Scheduler_SMP_Cancel_ask_for_help( Thread_Control *thread )
601{
602 Per_CPU_Control *cpu;
603
604 _Assert( _ISR_lock_Is_owner( &thread->Scheduler.Lock ) );
605 cpu = thread->Scheduler.ask_for_help_cpu;
606
607 if ( RTEMS_PREDICT_FALSE( cpu != NULL ) ) {
609 }
610}
611
626static inline void _Scheduler_SMP_Request_ask_for_help( Thread_Control *thread )
627{
628 ISR_lock_Context lock_context;
629 Per_CPU_Control *cpu_self;
630
631 cpu_self = _Per_CPU_Get();
632
633 _Assert( thread->Scheduler.ask_for_help_cpu == NULL );
634 thread->Scheduler.ask_for_help_cpu = cpu_self;
635 cpu_self->dispatch_necessary = true;
636
637 _Per_CPU_Acquire( cpu_self, &lock_context );
638 _Chain_Append_unprotected(
639 &cpu_self->Threads_in_need_for_help,
640 &thread->Scheduler.Help_node
641 );
642 _Per_CPU_Release( cpu_self, &lock_context );
643}
644
649typedef enum {
650 SCHEDULER_SMP_DO_SCHEDULE,
651 SCHEDULER_SMP_DO_NOT_SCHEDULE
653
680static inline Scheduler_SMP_Action _Scheduler_SMP_Try_to_schedule(
681 Scheduler_Node *node,
682 Scheduler_Get_idle_node get_idle_node,
683 void *arg
684)
685{
686 ISR_lock_Context lock_context;
687 Thread_Control *owner;
688 Thread_Scheduler_state owner_state;
689 int owner_sticky_level;
690
691 owner = _Scheduler_Node_get_owner( node );
692 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
693
694 _Thread_Scheduler_acquire_critical( owner, &lock_context );
695 owner_state = owner->Scheduler.state;
696 owner_sticky_level = node->sticky_level;
697
698 if ( RTEMS_PREDICT_TRUE( owner_state == THREAD_SCHEDULER_READY ) ) {
699 _Scheduler_SMP_Cancel_ask_for_help( owner );
700 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
701 _Thread_Scheduler_release_critical( owner, &lock_context );
702 return SCHEDULER_SMP_DO_SCHEDULE;
703 }
704
705 _Thread_Scheduler_release_critical( owner, &lock_context );
706
707 if (
708 ( owner_state == THREAD_SCHEDULER_SCHEDULED && owner_sticky_level <= 1 ) ||
709 owner_sticky_level == 0
710 ) {
711 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
712
713 return SCHEDULER_SMP_DO_NOT_SCHEDULE;
714 }
715
716 (void) _Scheduler_Use_idle_thread( node, get_idle_node, arg );
717
718 return SCHEDULER_SMP_DO_SCHEDULE;
719}
720
732static inline void _Scheduler_SMP_Allocate_processor_lazy(
733 Scheduler_Context *context,
734 Scheduler_Node *scheduled,
735 Per_CPU_Control *cpu
736)
737{
738 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
739 Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
740 Per_CPU_Control *cpu_self = _Per_CPU_Get();
741
742 _Assert( _ISR_Get_level() != 0 );
743
744 if ( cpu == scheduled_cpu ) {
745 _Thread_Set_CPU( scheduled_thread, cpu );
746 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
747
748 return;
749 }
750
751 if (
752 _Thread_Is_executing_on_a_processor( scheduled_thread ) &&
753 _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu )
754 ) {
755 Thread_Control *heir = scheduled_cpu->heir;
756 _Thread_Dispatch_update_heir( cpu_self, scheduled_cpu, scheduled_thread );
757 _Thread_Set_CPU( heir, cpu );
758 _Thread_Dispatch_update_heir( cpu_self, cpu, heir );
759
760 return;
761 }
762
763 _Thread_Set_CPU( scheduled_thread, cpu );
764 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
765}
766
781static inline void _Scheduler_SMP_Allocate_processor_exact(
782 Scheduler_Context *context,
783 Scheduler_Node *scheduled,
784 Per_CPU_Control *cpu
785)
786{
787 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
788 Per_CPU_Control *cpu_self = _Per_CPU_Get();
789
790 (void) context;
791
792 _Thread_Set_CPU( scheduled_thread, cpu );
793 _Thread_Dispatch_update_heir( cpu_self, cpu, scheduled_thread );
794}
795
808static inline void _Scheduler_SMP_Allocate_processor(
809 Scheduler_Context *context,
810 Scheduler_Node *scheduled,
811 Per_CPU_Control *cpu,
812 Scheduler_SMP_Allocate_processor allocate_processor
813)
814{
815 _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
816 ( *allocate_processor )( context, scheduled, cpu );
817}
818
834static inline void _Scheduler_SMP_Preempt(
835 Scheduler_Context *context,
836 Scheduler_Node *scheduled,
837 Scheduler_Node *victim,
838 Thread_Control *victim_idle,
839 Scheduler_SMP_Allocate_processor allocate_processor
840)
841{
842 Thread_Control *victim_owner;
843 ISR_lock_Context lock_context;
844 Per_CPU_Control *cpu;
845
846 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
847
848 victim_owner = _Scheduler_Node_get_owner( victim );
849 _Thread_Scheduler_acquire_critical( victim_owner, &lock_context );
850
851 if ( RTEMS_PREDICT_TRUE( victim_idle == NULL ) ) {
852 if ( victim_owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
853 _Scheduler_Thread_change_state( victim_owner, THREAD_SCHEDULER_READY );
854
855 if ( victim_owner->Scheduler.helping_nodes > 0 ) {
856 _Scheduler_SMP_Request_ask_for_help( victim_owner );
857 }
858 }
859
860 cpu = _Thread_Get_CPU( victim_owner );
861 } else {
862 cpu = _Thread_Get_CPU( victim_idle );
863 }
864
865 _Thread_Scheduler_release_critical( victim_owner, &lock_context );
866
867 _Scheduler_SMP_Allocate_processor(
868 context,
869 scheduled,
870 cpu,
871 allocate_processor
872 );
873}
874
883static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
884 Scheduler_Context *context,
885 Scheduler_Node *filter
886)
887{
889 Scheduler_Node *lowest_scheduled;
890
891 (void) filter;
892
893 self = _Scheduler_SMP_Get_self( context );
894
895 _Assert( !_Chain_Is_empty( &self->Scheduled ) );
896 lowest_scheduled = (Scheduler_Node *) _Chain_Last( &self->Scheduled );
897
898 _Assert(
899 _Chain_Next( &lowest_scheduled->Node.Chain ) ==
900 _Chain_Tail( &self->Scheduled )
901 );
902
903 return lowest_scheduled;
904}
905
922static inline void _Scheduler_SMP_Enqueue_to_scheduled(
923 Scheduler_Context *context,
924 Scheduler_Node *node,
925 Priority_Control priority,
926 Scheduler_Node *lowest_scheduled,
927 Scheduler_SMP_Insert insert_scheduled,
928 Scheduler_SMP_Move move_from_scheduled_to_ready,
929 Scheduler_SMP_Move move_from_ready_to_scheduled,
930 Scheduler_SMP_Allocate_processor allocate_processor,
931 Scheduler_Get_idle_node get_idle_node,
932 Scheduler_Release_idle_node release_idle_node
933)
934{
935 Thread_Control *lowest_scheduled_idle;
937
938 lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
939 lowest_scheduled,
940 release_idle_node,
941 context
942 );
943
944 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
945
946 action = _Scheduler_SMP_Try_to_schedule( node, get_idle_node, context );
947
948 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
949 _Scheduler_SMP_Preempt(
950 context,
951 node,
952 lowest_scheduled,
953 lowest_scheduled_idle,
954 allocate_processor
955 );
956
957 ( *insert_scheduled )( context, node, priority );
958 } else {
959 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
960
961 if ( lowest_scheduled_idle != NULL ) {
962 (void) _Scheduler_Use_idle_thread( lowest_scheduled, get_idle_node, context );
963 }
964
965 ( *move_from_ready_to_scheduled )( context, lowest_scheduled );
966 }
967}
968
991static inline bool _Scheduler_SMP_Enqueue(
992 Scheduler_Context *context,
993 Scheduler_Node *node,
994 Priority_Control insert_priority,
995 Chain_Node_order order,
996 Scheduler_SMP_Insert insert_ready,
997 Scheduler_SMP_Insert insert_scheduled,
998 Scheduler_SMP_Move move_from_scheduled_to_ready,
999 Scheduler_SMP_Move move_from_ready_to_scheduled,
1000 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1001 Scheduler_SMP_Allocate_processor allocate_processor,
1002 Scheduler_Get_idle_node get_idle_node,
1003 Scheduler_Release_idle_node release_idle_node
1004)
1005{
1006 bool needs_help;
1007 Scheduler_Node *lowest_scheduled;
1008
1009 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1010
1011 if (
1012 ( *order )(
1013 &insert_priority,
1014 &node->Node.Chain,
1015 &lowest_scheduled->Node.Chain
1016 )
1017 ) {
1018 _Scheduler_SMP_Enqueue_to_scheduled(
1019 context,
1020 node,
1021 insert_priority,
1022 lowest_scheduled,
1023 insert_scheduled,
1024 move_from_scheduled_to_ready,
1025 move_from_ready_to_scheduled,
1026 allocate_processor,
1027 get_idle_node,
1028 release_idle_node
1029 );
1030 needs_help = false;
1031 } else {
1032 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1033 ( *insert_ready )( context, node, insert_priority );
1034 needs_help = true;
1035 }
1036
1037 return needs_help;
1038}
1039
1059static inline void _Scheduler_SMP_Enqueue_scheduled(
1060 Scheduler_Context *context,
1061 Scheduler_Node *const node,
1062 Priority_Control insert_priority,
1063 Chain_Node_order order,
1064 Scheduler_SMP_Extract extract_from_ready,
1065 Scheduler_SMP_Get_highest_ready get_highest_ready,
1066 Scheduler_SMP_Insert insert_ready,
1067 Scheduler_SMP_Insert insert_scheduled,
1068 Scheduler_SMP_Move move_from_ready_to_scheduled,
1069 Scheduler_SMP_Allocate_processor allocate_processor,
1070 Scheduler_Get_idle_node get_idle_node,
1071 Scheduler_Release_idle_node release_idle_node
1072)
1073{
1074 Thread_Control *node_idle;
1075
1076 node_idle = _Scheduler_Release_idle_thread_if_necessary(
1077 node,
1078 release_idle_node,
1079 context
1080 );
1081
1082 while ( true ) {
1083 Scheduler_Node *highest_ready;
1084 Scheduler_SMP_Action action;
1085
1086 highest_ready = ( *get_highest_ready )( context, node );
1087
1088 /*
1089 * The node has been extracted from the scheduled chain. We have to place
1090 * it now on the scheduled or ready set.
1091 */
1092 if (
1093 node->sticky_level > 0 && ( *order )(
1094 &insert_priority,
1095 &node->Node.Chain,
1096 &highest_ready->Node.Chain
1097 )
1098 ) {
1099 if ( node_idle != NULL ) {
1100 Thread_Control *owner;
1101 ISR_lock_Context lock_context;
1102
1103 owner = _Scheduler_Node_get_owner( node );
1104 _Thread_Scheduler_acquire_critical( owner, &lock_context );
1105
1106 if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
1107 Per_CPU_Control *cpu;
1108
1109 _Scheduler_SMP_Cancel_ask_for_help( owner );
1110 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
1111 cpu = _Thread_Get_CPU( node_idle );
1112 _Thread_Set_CPU( owner, cpu );
1113 _Thread_Scheduler_release_critical( owner, &lock_context );
1114 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, owner );
1115 } else {
1116 Thread_Control *new_idle;
1117
1118 _Thread_Scheduler_release_critical( owner, &lock_context );
1119 new_idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1120 _Assert_Unused_variable_equals( new_idle, node_idle );
1121 }
1122 }
1123
1124 ( *insert_scheduled )( context, node, insert_priority );
1125
1126 return;
1127 }
1128
1129 action = _Scheduler_SMP_Try_to_schedule(
1130 highest_ready,
1131 get_idle_node,
1132 context
1133 );
1134
1135 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1136 _Scheduler_SMP_Preempt(
1137 context,
1138 highest_ready,
1139 node,
1140 node_idle,
1141 allocate_processor
1142 );
1143
1144 ( *move_from_ready_to_scheduled )( context, highest_ready );
1145 ( *insert_ready )( context, node, insert_priority );
1146 return;
1147 }
1148
1149 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1150 ( *extract_from_ready )( context, highest_ready );
1151 }
1152}
1153
1160static inline void _Scheduler_SMP_Extract_from_scheduled(
1161 Scheduler_Context *context,
1162 Scheduler_Node *node
1163)
1164{
1165 (void) context;
1166 _Chain_Extract_unprotected( &node->Node.Chain );
1167}
1168
1185static inline void _Scheduler_SMP_Schedule_highest_ready(
1186 Scheduler_Context *context,
1187 Scheduler_Node *victim,
1188 Per_CPU_Control *cpu,
1189 Scheduler_SMP_Extract extract_from_scheduled,
1190 Scheduler_SMP_Extract extract_from_ready,
1191 Scheduler_SMP_Get_highest_ready get_highest_ready,
1192 Scheduler_SMP_Move move_from_ready_to_scheduled,
1193 Scheduler_SMP_Allocate_processor allocate_processor,
1194 Scheduler_Get_idle_node get_idle_node
1195)
1196{
1197 Scheduler_SMP_Action action;
1198
1199 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_BLOCKED );
1200 ( *extract_from_scheduled )( context, victim );
1201
1202 while ( true ) {
1203 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1204
1205 action = _Scheduler_SMP_Try_to_schedule(
1206 highest_ready,
1207 get_idle_node,
1208 context
1209 );
1210
1211 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1212 _Scheduler_SMP_Allocate_processor(
1213 context,
1214 highest_ready,
1215 cpu,
1216 allocate_processor
1217 );
1218
1219 ( *move_from_ready_to_scheduled )( context, highest_ready );
1220 return;
1221 }
1222
1223 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1224 ( *extract_from_ready )( context, highest_ready );
1225 }
1226}
1227
1241static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1242 Scheduler_Context *context,
1243 Scheduler_Node *victim,
1244 Scheduler_SMP_Extract extract_from_ready,
1245 Scheduler_SMP_Get_highest_ready get_highest_ready,
1246 Scheduler_SMP_Move move_from_ready_to_scheduled,
1247 Scheduler_SMP_Allocate_processor allocate_processor,
1248 Scheduler_Get_idle_node get_idle_node,
1249 Scheduler_Release_idle_node release_idle_node
1250)
1251{
1252 Thread_Control *victim_idle;
1253 Scheduler_SMP_Action action;
1254
1255 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
1256 victim_idle = _Scheduler_Release_idle_thread_if_necessary(
1257 victim,
1258 release_idle_node,
1259 context
1260 );
1261
1262 while ( true ) {
1263 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1264
1265 action = _Scheduler_SMP_Try_to_schedule(
1266 highest_ready,
1267 get_idle_node,
1268 context
1269 );
1270
1271 if ( RTEMS_PREDICT_TRUE( action == SCHEDULER_SMP_DO_SCHEDULE ) ) {
1272 _Scheduler_SMP_Preempt(
1273 context,
1274 highest_ready,
1275 victim,
1276 victim_idle,
1277 allocate_processor
1278 );
1279
1280 ( *move_from_ready_to_scheduled )( context, highest_ready );
1281 return;
1282 }
1283
1284 _Assert( action == SCHEDULER_SMP_DO_NOT_SCHEDULE );
1285 ( *extract_from_ready )( context, highest_ready );
1286 }
1287}
1288
1305static inline void _Scheduler_SMP_Block(
1306 Scheduler_Context *context,
1307 Thread_Control *thread,
1308 Scheduler_Node *node,
1309 Scheduler_SMP_Extract extract_from_scheduled,
1310 Scheduler_SMP_Extract extract_from_ready,
1311 Scheduler_SMP_Get_highest_ready get_highest_ready,
1312 Scheduler_SMP_Move move_from_ready_to_scheduled,
1313 Scheduler_SMP_Allocate_processor allocate_processor,
1314 Scheduler_Get_idle_node get_idle_node
1315)
1316{
1317 int sticky_level;
1318 ISR_lock_Context lock_context;
1319 Scheduler_SMP_Node_state node_state;
1320 Per_CPU_Control *cpu;
1321
1322 sticky_level = node->sticky_level;
1323 --sticky_level;
1324 node->sticky_level = sticky_level;
1325 _Assert( sticky_level >= 0 );
1326
1327 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1328 _Scheduler_SMP_Cancel_ask_for_help( thread );
1329 cpu = _Thread_Get_CPU( thread );
1330 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1331 _Thread_Scheduler_release_critical( thread, &lock_context );
1332
1333 node_state = _Scheduler_SMP_Node_state( node );
1334
1335 if ( RTEMS_PREDICT_FALSE( sticky_level > 0 ) ) {
1336 if (
1337 node_state == SCHEDULER_SMP_NODE_SCHEDULED &&
1338 _Scheduler_Node_get_idle( node ) == NULL
1339 ) {
1340 Thread_Control *idle;
1341
1342 idle = _Scheduler_Use_idle_thread( node, get_idle_node, context );
1343 _Thread_Set_CPU( idle, cpu );
1344 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, idle );
1345 }
1346
1347 return;
1348 }
1349
1350 _Assert( _Scheduler_Node_get_user( node ) == thread );
1351 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1352
1353 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1354 _Scheduler_SMP_Schedule_highest_ready(
1355 context,
1356 node,
1357 cpu,
1358 extract_from_scheduled,
1359 extract_from_ready,
1360 get_highest_ready,
1361 move_from_ready_to_scheduled,
1362 allocate_processor,
1363 get_idle_node
1364 );
1365 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1366 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1367 ( *extract_from_ready )( context, node );
1368 }
1369}
1370
1381static inline void _Scheduler_SMP_Unblock(
1382 Scheduler_Context *context,
1383 Thread_Control *thread,
1384 Scheduler_Node *node,
1385 Scheduler_SMP_Update update,
1386 Scheduler_SMP_Enqueue enqueue,
1387 Scheduler_Release_idle_node release_idle_node
1388)
1389{
1390 Scheduler_SMP_Node_state node_state;
1391 Priority_Control priority;
1392
1393 _Assert( _Chain_Is_node_off_chain( &thread->Scheduler.Help_node ) );
1394
1395 ++node->sticky_level;
1396 _Assert( node->sticky_level > 0 );
1397
1398 node_state = _Scheduler_SMP_Node_state( node );
1399
1400 if ( RTEMS_PREDICT_FALSE( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) ) {
1401 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1402 _Scheduler_Discard_idle_thread(
1403 thread,
1404 node,
1405 release_idle_node,
1406 context
1407 );
1408
1409 return;
1410 }
1411
1412 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_READY );
1413
1414 priority = _Scheduler_Node_get_priority( node );
1415 priority = SCHEDULER_PRIORITY_PURIFY( priority );
1416
1417 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1418 ( *update )( context, node, priority );
1419 }
1420
1421 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1422 Priority_Control insert_priority;
1423 bool needs_help;
1424
1425 insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1426 needs_help = ( *enqueue )( context, node, insert_priority );
1427
1428 if ( needs_help && thread->Scheduler.helping_nodes > 0 ) {
1429 _Scheduler_SMP_Request_ask_for_help( thread );
1430 }
1431 } else {
1432 _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1433 _Assert( node->sticky_level > 0 );
1434 _Assert( node->idle == NULL );
1435 _Scheduler_SMP_Request_ask_for_help( thread );
1436 }
1437}
1438
1460static inline void _Scheduler_SMP_Update_priority(
1461 Scheduler_Context *context,
1462 Thread_Control *thread,
1463 Scheduler_Node *node,
1464 Scheduler_SMP_Extract extract_from_scheduled,
1465 Scheduler_SMP_Extract extract_from_ready,
1466 Scheduler_SMP_Update update,
1467 Scheduler_SMP_Enqueue enqueue,
1468 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1469 Scheduler_SMP_Ask_for_help ask_for_help
1470)
1471{
1472 Priority_Control priority;
1473 Priority_Control insert_priority;
1474 Scheduler_SMP_Node_state node_state;
1475
1476 insert_priority = _Scheduler_Node_get_priority( node );
1477 priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1478
1479 if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1480 if ( _Thread_Is_ready( thread ) ) {
1481 ( *ask_for_help )( context, thread, node );
1482 }
1483
1484 return;
1485 }
1486
1487 node_state = _Scheduler_SMP_Node_state( node );
1488
1489 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1490 ( *extract_from_scheduled )( context, node );
1491 ( *update )( context, node, priority );
1492 ( *enqueue_scheduled )( context, node, insert_priority );
1493 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1494 ( *extract_from_ready )( context, node );
1495 ( *update )( context, node, priority );
1496 ( *enqueue )( context, node, insert_priority );
1497 } else {
1498 ( *update )( context, node, priority );
1499
1500 if ( _Thread_Is_ready( thread ) ) {
1501 ( *ask_for_help )( context, thread, node );
1502 }
1503 }
1504}
1505
1519static inline void _Scheduler_SMP_Yield(
1520 Scheduler_Context *context,
1521 Thread_Control *thread,
1522 Scheduler_Node *node,
1523 Scheduler_SMP_Extract extract_from_scheduled,
1524 Scheduler_SMP_Extract extract_from_ready,
1525 Scheduler_SMP_Enqueue enqueue,
1526 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled
1527)
1528{
1529 (void) thread;
1530
1531 Scheduler_SMP_Node_state node_state;
1532 Priority_Control insert_priority;
1533
1534 node_state = _Scheduler_SMP_Node_state( node );
1535 insert_priority = _Scheduler_SMP_Node_priority( node );
1536 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1537
1538 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1539 ( *extract_from_scheduled )( context, node );
1540 ( *enqueue_scheduled )( context, node, insert_priority );
1541 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1542 ( *extract_from_ready )( context, node );
1543 (void) ( *enqueue )( context, node, insert_priority );
1544 }
1545}
1546
1554static inline void _Scheduler_SMP_Insert_scheduled(
1555 Scheduler_Context *context,
1556 Scheduler_Node *node_to_insert,
1557 Priority_Control priority_to_insert
1558)
1559{
1561
1562 self = _Scheduler_SMP_Get_self( context );
1563
1564 _Chain_Insert_ordered_unprotected(
1565 &self->Scheduled,
1566 &node_to_insert->Node.Chain,
1567 &priority_to_insert,
1568 _Scheduler_SMP_Priority_less_equal
1569 );
1570}
1571
1594static inline bool _Scheduler_SMP_Ask_for_help(
1595 Scheduler_Context *context,
1596 Thread_Control *thread,
1597 Scheduler_Node *node,
1598 Chain_Node_order order,
1599 Scheduler_SMP_Insert insert_ready,
1600 Scheduler_SMP_Insert insert_scheduled,
1601 Scheduler_SMP_Move move_from_scheduled_to_ready,
1602 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1603 Scheduler_SMP_Allocate_processor allocate_processor,
1604 Scheduler_Release_idle_node release_idle_node
1605)
1606{
1607 Scheduler_Node *lowest_scheduled;
1608 ISR_lock_Context lock_context;
1609 bool success;
1610
1611 if ( thread->Scheduler.pinned_scheduler != NULL ) {
1612 /*
1613 * Pinned threads are not allowed to ask for help. Return success to break
1614 * the loop in _Thread_Ask_for_help() early.
1615 */
1616 return true;
1617 }
1618
1619 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1620
1621 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1622
1623 if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1624 Scheduler_SMP_Node_state node_state;
1625
1626 node_state = _Scheduler_SMP_Node_state( node );
1627
1628 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1629 Priority_Control insert_priority;
1630
1631 insert_priority = _Scheduler_SMP_Node_priority( node );
1632
1633 if (
1634 ( *order )(
1635 &insert_priority,
1636 &node->Node.Chain,
1637 &lowest_scheduled->Node.Chain
1638 )
1639 ) {
1640 Thread_Control *lowest_scheduled_idle;
1641
1642 _Scheduler_SMP_Cancel_ask_for_help( thread );
1643 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1644 _Thread_Scheduler_release_critical( thread, &lock_context );
1645
1646 lowest_scheduled_idle = _Scheduler_Release_idle_thread_if_necessary(
1647 lowest_scheduled,
1648 release_idle_node,
1649 context
1650 );
1651
1652 _Scheduler_SMP_Preempt(
1653 context,
1654 node,
1655 lowest_scheduled,
1656 lowest_scheduled_idle,
1657 allocate_processor
1658 );
1659
1660 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1661 ( *insert_scheduled )( context, node, insert_priority );
1662
1663 success = true;
1664 } else {
1665 _Thread_Scheduler_release_critical( thread, &lock_context );
1666
1667 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1668 ( *insert_ready )( context, node, insert_priority );
1669 success = false;
1670 }
1671 } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1672 _Scheduler_SMP_Cancel_ask_for_help( thread );
1673 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1674 _Thread_Scheduler_release_critical( thread, &lock_context );
1675 _Scheduler_Discard_idle_thread(
1676 thread,
1677 node,
1678 release_idle_node,
1679 context
1680 );
1681 success = true;
1682 } else {
1683 _Thread_Scheduler_release_critical( thread, &lock_context );
1684 success = false;
1685 }
1686 } else {
1687 _Thread_Scheduler_release_critical( thread, &lock_context );
1688 success = false;
1689 }
1690
1691 return success;
1692}
1693
1703static inline void _Scheduler_SMP_Reconsider_help_request(
1704 Scheduler_Context *context,
1705 Thread_Control *thread,
1706 Scheduler_Node *node,
1707 Scheduler_SMP_Extract extract_from_ready
1708)
1709{
1710 ISR_lock_Context lock_context;
1711
1712 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1713
1714 if (
1715 thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1716 && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1717 && node->sticky_level == 1
1718 ) {
1719 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1720 ( *extract_from_ready )( context, node );
1721 }
1722
1723 _Thread_Scheduler_release_critical( thread, &lock_context );
1724}
1725
1743static inline void _Scheduler_SMP_Withdraw_node(
1744 Scheduler_Context *context,
1745 Thread_Control *thread,
1746 Scheduler_Node *node,
1747 Thread_Scheduler_state next_state,
1748 Scheduler_SMP_Extract extract_from_scheduled,
1749 Scheduler_SMP_Extract extract_from_ready,
1750 Scheduler_SMP_Get_highest_ready get_highest_ready,
1751 Scheduler_SMP_Move move_from_ready_to_scheduled,
1752 Scheduler_SMP_Allocate_processor allocate_processor,
1753 Scheduler_Get_idle_node get_idle_node
1754)
1755{
1756 ISR_lock_Context lock_context;
1757 Scheduler_SMP_Node_state node_state;
1758
1759 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1760
1761 node_state = _Scheduler_SMP_Node_state( node );
1762
1763 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1764 Per_CPU_Control *cpu;
1765
1766 _Assert( thread == _Scheduler_Node_get_user( node ) );
1767 cpu = _Thread_Get_CPU( thread );
1768 _Scheduler_Thread_change_state( thread, next_state );
1769 _Thread_Scheduler_release_critical( thread, &lock_context );
1770
1771 _Assert( _Scheduler_Node_get_user( node ) == thread );
1772 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
1773
1774 _Scheduler_SMP_Schedule_highest_ready(
1775 context,
1776 node,
1777 cpu,
1778 extract_from_scheduled,
1779 extract_from_ready,
1780 get_highest_ready,
1781 move_from_ready_to_scheduled,
1782 allocate_processor,
1783 get_idle_node
1784 );
1785 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1786 _Thread_Scheduler_release_critical( thread, &lock_context );
1787 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1788 ( *extract_from_ready )( context, node );
1789 } else {
1790 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1791 _Thread_Scheduler_release_critical( thread, &lock_context );
1792 }
1793}
1794
1804static inline void _Scheduler_SMP_Make_sticky(
1805 const Scheduler_Control *scheduler,
1806 Thread_Control *the_thread,
1807 Scheduler_Node *node,
1808 Scheduler_SMP_Update update,
1809 Scheduler_SMP_Enqueue enqueue
1810)
1811{
1812 (void) the_thread;
1813
1814 Scheduler_SMP_Node_state node_state;
1815
1816 node_state = _Scheduler_SMP_Node_state( node );
1817
1818 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1819 Scheduler_Context *context;
1820 Priority_Control insert_priority;
1821 Priority_Control priority;
1822
1823 context = _Scheduler_Get_context( scheduler );
1824 priority = _Scheduler_Node_get_priority( node );
1825 priority = SCHEDULER_PRIORITY_PURIFY( priority );
1826
1827 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1828 ( *update )( context, node, priority );
1829 }
1830
1831 insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1832 (void) ( *enqueue )( context, node, insert_priority );
1833 }
1834}
1835
1845static inline void _Scheduler_SMP_Clean_sticky(
1846 const Scheduler_Control *scheduler,
1847 Thread_Control *the_thread,
1848 Scheduler_Node *node,
1849 Scheduler_SMP_Extract extract_from_scheduled,
1850 Scheduler_SMP_Extract extract_from_ready,
1851 Scheduler_SMP_Get_highest_ready get_highest_ready,
1852 Scheduler_SMP_Move move_from_ready_to_scheduled,
1853 Scheduler_SMP_Allocate_processor allocate_processor,
1854 Scheduler_Get_idle_node get_idle_node,
1855 Scheduler_Release_idle_node release_idle_node
1856)
1857{
1858 (void) the_thread;
1859
1860 Scheduler_SMP_Node_state node_state;
1861
1862 node_state = _Scheduler_SMP_Node_state( node );
1863
1864 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1865 Thread_Control *idle;
1866
1867 idle = _Scheduler_Node_get_idle( node );
1868
1869 if ( idle != NULL ) {
1870 Scheduler_Context *context;
1871
1872 context = _Scheduler_Get_context( scheduler );
1873
1874 _Scheduler_Release_idle_thread( node, idle, release_idle_node, context );
1875 _Scheduler_SMP_Schedule_highest_ready(
1876 context,
1877 node,
1878 _Thread_Get_CPU( idle ),
1879 extract_from_scheduled,
1880 extract_from_ready,
1881 get_highest_ready,
1882 move_from_ready_to_scheduled,
1883 allocate_processor,
1884 get_idle_node
1885 );
1886 }
1887 }
1888}
1889
1898static inline void _Scheduler_SMP_Do_start_idle(
1899 Scheduler_Context *context,
1900 Thread_Control *idle,
1901 Per_CPU_Control *cpu,
1902 Scheduler_SMP_Register_idle register_idle
1903)
1904{
1906 Scheduler_SMP_Node *node;
1907
1908 self = _Scheduler_SMP_Get_self( context );
1909 node = _Scheduler_SMP_Thread_get_node( idle );
1910
1911 _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1912 node->state = SCHEDULER_SMP_NODE_SCHEDULED;
1913
1914 _Thread_Set_CPU( idle, cpu );
1915 ( *register_idle )( context, &node->Base, cpu );
1916 _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1917}
1918
1928static inline void _Scheduler_SMP_Add_processor(
1929 Scheduler_Context *context,
1930 Thread_Control *idle,
1931 Scheduler_SMP_Has_ready has_ready,
1932 Scheduler_SMP_Enqueue_scheduled enqueue_scheduled,
1933 Scheduler_SMP_Register_idle register_idle
1934)
1935{
1937 Scheduler_Node *node;
1938
1939 self = _Scheduler_SMP_Get_self( context );
1940 idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1941 node = _Thread_Scheduler_get_home_node( idle );
1942 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1943 ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1944
1945 if ( ( *has_ready )( &self->Base ) ) {
1946 Priority_Control insert_priority;
1947
1948 insert_priority = _Scheduler_SMP_Node_priority( node );
1949 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1950 ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1951 } else {
1952 _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1953 }
1954}
1955
1969static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1970 Scheduler_Context *context,
1971 Per_CPU_Control *cpu,
1972 Scheduler_SMP_Extract extract_from_scheduled,
1973 Scheduler_SMP_Extract extract_from_ready,
1974 Scheduler_SMP_Enqueue enqueue,
1975 Scheduler_Get_idle_node get_idle_node,
1976 Scheduler_Release_idle_node release_idle_node
1977)
1978{
1979 (void) extract_from_ready;
1980
1982 Chain_Node *chain_node;
1983 Scheduler_Node *victim_node;
1984 Thread_Control *victim_user;
1985 Thread_Control *victim_owner;
1986 Thread_Control *idle;
1987
1988 self = _Scheduler_SMP_Get_self( context );
1989 chain_node = _Chain_First( &self->Scheduled );
1990
1991 do {
1992 _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1993 victim_node = (Scheduler_Node *) chain_node;
1994 victim_user = _Scheduler_Node_get_user( victim_node );
1995 chain_node = _Chain_Next( chain_node );
1996 } while ( _Thread_Get_CPU( victim_user ) != cpu );
1997
1998 ( *extract_from_scheduled )( &self->Base, victim_node );
1999 victim_owner = _Scheduler_Node_get_owner( victim_node );
2000
2001 if ( !victim_owner->is_idle ) {
2002 Thread_Control *victim_idle;
2003 Scheduler_Node *idle_node;
2004 Priority_Control insert_priority;
2005
2006 victim_idle = _Scheduler_Release_idle_thread_if_necessary(
2007 victim_node,
2008 release_idle_node,
2009 &self->Base
2010 );
2011 idle_node = ( *get_idle_node )( &self->Base );
2012 idle = _Scheduler_Node_get_owner( idle_node );
2013 _Scheduler_SMP_Preempt(
2014 &self->Base,
2015 idle_node,
2016 victim_node,
2017 victim_idle,
2018 _Scheduler_SMP_Allocate_processor_exact
2019 );
2020
2021 _Assert( !_Chain_Is_empty( &self->Scheduled ) );
2022 insert_priority = _Scheduler_SMP_Node_priority( victim_node );
2023 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2024 ( *enqueue )( &self->Base, victim_node, insert_priority );
2025 } else {
2026 _Assert( victim_owner == victim_user );
2027 _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
2028 idle = victim_owner;
2029 }
2030
2031 return idle;
2032}
2033
2055static inline void _Scheduler_SMP_Set_affinity(
2056 Scheduler_Context *context,
2057 Thread_Control *thread,
2058 Scheduler_Node *node,
2059 void *arg,
2060 Scheduler_SMP_Set_affinity set_affinity,
2061 Scheduler_SMP_Extract extract_from_scheduled,
2062 Scheduler_SMP_Extract extract_from_ready,
2063 Scheduler_SMP_Get_highest_ready get_highest_ready,
2064 Scheduler_SMP_Move move_from_ready_to_scheduled,
2065 Scheduler_SMP_Enqueue enqueue,
2066 Scheduler_SMP_Allocate_processor allocate_processor,
2067 Scheduler_Get_idle_node get_idle_node,
2068 Scheduler_Release_idle_node release_idle_node
2069)
2070{
2071 (void) thread;
2072
2073 Scheduler_SMP_Node_state node_state;
2074 Priority_Control insert_priority;
2075
2076 node_state = _Scheduler_SMP_Node_state( node );
2077 insert_priority = _Scheduler_SMP_Node_priority( node );
2078 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
2079
2080 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
2081 ( *extract_from_scheduled )( context, node );
2082 _Scheduler_SMP_Preempt_and_schedule_highest_ready(
2083 context,
2084 node,
2085 extract_from_ready,
2086 get_highest_ready,
2087 move_from_ready_to_scheduled,
2088 allocate_processor,
2089 get_idle_node,
2090 release_idle_node
2091 );
2092 ( *set_affinity )( context, node, arg );
2093 ( *enqueue )( context, node, insert_priority );
2094 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
2095 ( *extract_from_ready )( context, node );
2096 ( *set_affinity )( context, node, arg );
2097 ( *enqueue )( context, node, insert_priority );
2098 } else {
2099 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
2100 ( *set_affinity )( context, node, arg );
2101 }
2102}
2103
2106#ifdef __cplusplus
2107}
2108#endif /* __cplusplus */
2109
2110#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
This header file provides the interfaces of the Assert Handler.
This header file provides the kernel character input/output support API.
This header file provides interfaces of the Chain Handler which are only used by the implementation.
#define RTEMS_PREDICT_FALSE(_exp)
Evaluates the integral expression and tells the compiler that the predicted value is false (0).
Definition: basedefs.h:757
#define RTEMS_PREDICT_TRUE(_exp)
Evaluates the integral expression and tells the compiler that the predicted value is true (1).
Definition: basedefs.h:776
#define _Assert_Unused_variable_equals(_var, _val)
Assert if unused return value is equal.
Definition: assert.h:108
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG and static analysis runs.
Definition: assert.h:96
bool(* Chain_Node_order)(const void *key, const Chain_Node *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:853
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:147
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:91
void _Scheduler_SMP_Remove_ask_for_help_from_processor(Thread_Control *thread, Per_CPU_Control *cpu)
Removes the thread's ask for help request from the processor.
Definition: schedulersmp.c:43
Scheduler_SMP_Action
This enumeration defines what a scheduler should do with a node which could be scheduled.
Definition: schedulersmpimpl.h:649
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:76
@ SCHEDULER_SMP_NODE_READY
This scheduler node is ready.
Definition: schedulersmp.h:100
@ SCHEDULER_SMP_NODE_BLOCKED
This scheduler node is blocked.
Definition: schedulersmp.h:82
@ SCHEDULER_SMP_NODE_SCHEDULED
The scheduler node is scheduled.
Definition: schedulersmp.h:92
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
Definition: schedulernodeimpl.h:81
#define SCHEDULER_PRIORITY_PURIFY(priority)
Clears the priority append indicator bit.
Definition: schedulernodeimpl.h:75
This header file provides interfaces of the Simple Priority Scheduler which are only used by the impl...
This header file provides interfaces of the SMP Scheduler which are used by the implementation and th...
This structure represents a chain node.
Definition: chain.h:78
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:94
Per CPU Core Structure.
Definition: percpu.h:384
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:473
volatile bool dispatch_necessary
This is set to true when this processor needs to run the thread dispatcher.
Definition: percpu.h:437
Scheduler context.
Definition: scheduler.h:318
Scheduler node for per-thread data.
Definition: schedulernode.h:94
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:61
Chain_Control Scheduled
The chain of scheduled nodes.
Definition: schedulersmp.h:70
Scheduler_Context Base
Basic scheduler context.
Definition: schedulersmp.h:65
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:106
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:120
Scheduler control.
Definition: scheduler.h:337
Definition: thread.h:837
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:874