RTEMS 6.1-rc4
Loading...
Searching...
No Matches
schedulerimpl.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * Copyright (C) 2010 Gedare Bloom.
14 * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
15 * Copyright (C) 2014, 2017 embedded brains GmbH & Co. KG
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
27 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
30 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
40#define _RTEMS_SCORE_SCHEDULERIMPL_H
41
43#include <rtems/score/assert.h>
45#include <rtems/score/smpimpl.h>
46#include <rtems/score/status.h>
48
49#ifdef __cplusplus
50extern "C" {
51#endif
52
96
104static inline Scheduler_Context *_Scheduler_Get_context(
105 const Scheduler_Control *scheduler
106)
107{
108 return scheduler->context;
109}
110
118static inline const Scheduler_Control *_Scheduler_Get_by_CPU(
119 const Per_CPU_Control *cpu
120)
121{
122#if defined(RTEMS_SMP)
123 return cpu->Scheduler.control;
124#else
125 (void) cpu;
126 return &_Scheduler_Table[ 0 ];
127#endif
128}
129
138static inline void _Scheduler_Acquire_critical(
139 const Scheduler_Control *scheduler,
140 ISR_lock_Context *lock_context
141)
142{
143#if defined(RTEMS_SMP)
145
146 context = _Scheduler_Get_context( scheduler );
147 _ISR_lock_Acquire( &context->Lock, lock_context );
148#else
149 (void) scheduler;
150 (void) lock_context;
151#endif
152}
153
162static inline void _Scheduler_Release_critical(
163 const Scheduler_Control *scheduler,
164 ISR_lock_Context *lock_context
165)
166{
167#if defined(RTEMS_SMP)
169
170 context = _Scheduler_Get_context( scheduler );
171 _ISR_lock_Release( &context->Lock, lock_context );
172#else
173 (void) scheduler;
174 (void) lock_context;
175#endif
176}
177
178#if defined(RTEMS_SMP)
188static inline bool _Scheduler_Is_non_preempt_mode_supported(
189 const Scheduler_Control *scheduler
190)
191{
192 return scheduler->is_non_preempt_mode_supported;
193}
194#endif
195
204/*
205 * Passing the Scheduler_Control* to these functions allows for multiple
206 * scheduler's to exist simultaneously, which could be useful on an SMP
207 * system. Then remote Schedulers may be accessible. How to protect such
208 * accesses remains an open problem.
209 */
210
219static inline void _Scheduler_Schedule( Thread_Control *the_thread )
220{
221 const Scheduler_Control *scheduler;
222 ISR_lock_Context lock_context;
223
224 scheduler = _Thread_Scheduler_get_home( the_thread );
225 _Scheduler_Acquire_critical( scheduler, &lock_context );
226
227 ( *scheduler->Operations.schedule )( scheduler, the_thread );
228
229 _Scheduler_Release_critical( scheduler, &lock_context );
230}
231
240static inline void _Scheduler_Yield( Thread_Control *the_thread )
241{
242 const Scheduler_Control *scheduler;
243 ISR_lock_Context lock_context;
244
245 scheduler = _Thread_Scheduler_get_home( the_thread );
246 _Scheduler_Acquire_critical( scheduler, &lock_context );
247 ( *scheduler->Operations.yield )(
248 scheduler,
249 the_thread,
250 _Thread_Scheduler_get_home_node( the_thread )
251 );
252 _Scheduler_Release_critical( scheduler, &lock_context );
253}
254
265static inline void _Scheduler_Block( Thread_Control *the_thread )
266{
267#if defined(RTEMS_SMP)
268 Chain_Node *node;
269 const Chain_Node *tail;
270 Scheduler_Node *scheduler_node;
271 const Scheduler_Control *scheduler;
272 ISR_lock_Context lock_context;
273
274 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
275 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
276
277 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
278 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
279
280 _Scheduler_Acquire_critical( scheduler, &lock_context );
281 ( *scheduler->Operations.block )(
282 scheduler,
283 the_thread,
284 scheduler_node
285 );
286 _Scheduler_Release_critical( scheduler, &lock_context );
287
288 node = _Chain_Next( node );
289
290 while ( node != tail ) {
291 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
292 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
293
294 _Scheduler_Acquire_critical( scheduler, &lock_context );
295 ( *scheduler->Operations.withdraw_node )(
296 scheduler,
297 the_thread,
298 scheduler_node,
299 THREAD_SCHEDULER_BLOCKED
300 );
301 _Scheduler_Release_critical( scheduler, &lock_context );
302
303 node = _Chain_Next( node );
304 }
305#else
306 const Scheduler_Control *scheduler;
307
308 scheduler = _Thread_Scheduler_get_home( the_thread );
309 ( *scheduler->Operations.block )(
310 scheduler,
311 the_thread,
312 _Thread_Scheduler_get_home_node( the_thread )
313 );
314#endif
315}
316
327static inline void _Scheduler_Unblock( Thread_Control *the_thread )
328{
329 Scheduler_Node *scheduler_node;
330 const Scheduler_Control *scheduler;
331 ISR_lock_Context lock_context;
332
333#if defined(RTEMS_SMP)
334 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
335 _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
336 );
337 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
338#else
339 scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
340 scheduler = _Thread_Scheduler_get_home( the_thread );
341#endif
342
343 _Scheduler_Acquire_critical( scheduler, &lock_context );
344 ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
345 _Scheduler_Release_critical( scheduler, &lock_context );
346}
347
362static inline void _Scheduler_Update_priority( Thread_Control *the_thread )
363{
364#if defined(RTEMS_SMP)
365 Chain_Node *node;
366 const Chain_Node *tail;
367
368 _Thread_Scheduler_process_requests( the_thread );
369
370 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
371 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
372
373 do {
374 Scheduler_Node *scheduler_node;
375 const Scheduler_Control *scheduler;
376 ISR_lock_Context lock_context;
377
378 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
379 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
380
381 _Scheduler_Acquire_critical( scheduler, &lock_context );
382 ( *scheduler->Operations.update_priority )(
383 scheduler,
384 the_thread,
385 scheduler_node
386 );
387 _Scheduler_Release_critical( scheduler, &lock_context );
388
389 node = _Chain_Next( node );
390 } while ( node != tail );
391#else
392 const Scheduler_Control *scheduler;
393
394 scheduler = _Thread_Scheduler_get_home( the_thread );
395 ( *scheduler->Operations.update_priority )(
396 scheduler,
397 the_thread,
398 _Thread_Scheduler_get_home_node( the_thread )
399 );
400#endif
401}
402
416static inline Priority_Control _Scheduler_Map_priority(
417 const Scheduler_Control *scheduler,
418 Priority_Control priority
419)
420{
421 return ( *scheduler->Operations.map_priority )( scheduler, priority );
422}
423
432static inline Priority_Control _Scheduler_Unmap_priority(
433 const Scheduler_Control *scheduler,
434 Priority_Control priority
435)
436{
437 return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
438}
439
453static inline void _Scheduler_Node_initialize(
454 const Scheduler_Control *scheduler,
455 Scheduler_Node *node,
456 Thread_Control *the_thread,
457 Priority_Control priority
458)
459{
460 ( *scheduler->Operations.node_initialize )(
461 scheduler,
462 node,
463 the_thread,
464 priority
465 );
466}
467
477static inline void _Scheduler_Node_destroy(
478 const Scheduler_Control *scheduler,
479 Scheduler_Node *node
480)
481{
482 ( *scheduler->Operations.node_destroy )( scheduler, node );
483}
484
494static inline void _Scheduler_Release_job(
495 Thread_Control *the_thread,
496 Priority_Node *priority_node,
497 uint64_t deadline,
498 Thread_queue_Context *queue_context
499)
500{
501 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
502
503 _Thread_queue_Context_clear_priority_updates( queue_context );
504 ( *scheduler->Operations.release_job )(
505 scheduler,
506 the_thread,
507 priority_node,
508 deadline,
509 queue_context
510 );
511}
512
521static inline void _Scheduler_Cancel_job(
522 Thread_Control *the_thread,
523 Priority_Node *priority_node,
524 Thread_queue_Context *queue_context
525)
526{
527 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
528
529 _Thread_queue_Context_clear_priority_updates( queue_context );
530 ( *scheduler->Operations.cancel_job )(
531 scheduler,
532 the_thread,
533 priority_node,
534 queue_context
535 );
536}
537
547static inline void _Scheduler_Start_idle(
548 const Scheduler_Control *scheduler,
549 Thread_Control *the_thread,
550 Per_CPU_Control *cpu
551)
552{
553 ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
554}
555
566static inline bool _Scheduler_Has_processor_ownership(
567 const Scheduler_Control *scheduler,
568 uint32_t cpu_index
569)
570{
571#if defined(RTEMS_SMP)
572 const Per_CPU_Control *cpu;
573 const Scheduler_Control *scheduler_of_cpu;
574
575 cpu = _Per_CPU_Get_by_index( cpu_index );
576 scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
577
578 return scheduler_of_cpu == scheduler;
579#else
580 (void) scheduler;
581 (void) cpu_index;
582
583 return true;
584#endif
585}
586
594static inline const Processor_mask *_Scheduler_Get_processors(
595 const Scheduler_Control *scheduler
596)
597{
598#if defined(RTEMS_SMP)
599 return &_Scheduler_Get_context( scheduler )->Processors;
600#else
601 return &_Processor_mask_The_one_and_only;
602#endif
603}
604
617 Thread_Control *the_thread,
618 size_t cpusetsize,
619 cpu_set_t *cpuset
620);
621
635static inline Status_Control _Scheduler_default_Set_affinity_body(
636 const Scheduler_Control *scheduler,
637 Thread_Control *the_thread,
638 Scheduler_Node *node,
639 const Processor_mask *affinity
640)
641{
642 (void) scheduler;
643 (void) the_thread;
644 (void) node;
645
646 if ( !_Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() ) ) {
647 return STATUS_INVALID_NUMBER;
648 }
649
650 return STATUS_SUCCESSFUL;
651}
652
666 Thread_Control *the_thread,
667 size_t cpusetsize,
668 const cpu_set_t *cpuset
669);
670
678static inline uint32_t _Scheduler_Get_processor_count(
679 const Scheduler_Control *scheduler
680)
681{
682#if defined(RTEMS_SMP)
683 const Scheduler_Context *context = _Scheduler_Get_context( scheduler );
684
685 return _Processor_mask_Count( &context->Processors );
686#else
687 (void) scheduler;
688
689 return 1;
690#endif
691}
692
700static inline Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
701{
702 return _Objects_Build_id(
703 OBJECTS_FAKE_OBJECTS_API,
704 OBJECTS_FAKE_OBJECTS_SCHEDULERS,
706 (uint16_t) ( scheduler_index + 1 )
707 );
708}
709
717static inline uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
718{
719 uint32_t minimum_id = _Scheduler_Build_id( 0 );
720
721 return id - minimum_id;
722}
723
731static inline const Scheduler_Control *_Scheduler_Get_by_id(
732 Objects_Id id
733)
734{
735 uint32_t index;
736
737 index = _Scheduler_Get_index_by_id( id );
738
739 if ( index >= _Scheduler_Count ) {
740 return NULL;
741 }
742
743 return &_Scheduler_Table[ index ];
744}
745
753static inline uint32_t _Scheduler_Get_index(
754 const Scheduler_Control *scheduler
755)
756{
757 return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
758}
759
760#if defined(RTEMS_SMP)
770typedef Scheduler_Node *( *Scheduler_Get_idle_node )( void *arg );
771
779typedef void ( *Scheduler_Release_idle_node )(
780 Scheduler_Node *node,
781 void *arg
782);
783
790static inline void _Scheduler_Thread_change_state(
791 Thread_Control *the_thread,
792 Thread_Scheduler_state new_state
793)
794{
795 _Assert(
796 _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
797 || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
798 || !_System_state_Is_up( _System_state_Get() )
799 );
800
801 the_thread->Scheduler.state = new_state;
802}
803
813static inline Thread_Control *_Scheduler_Use_idle_thread(
814 Scheduler_Node *node,
815 Scheduler_Get_idle_node get_idle_node,
816 void *arg
817)
818{
819 Scheduler_Node *idle_node;
820 Thread_Control *idle;
821
822 idle_node = ( *get_idle_node )( arg );
823 idle = _Scheduler_Node_get_owner( idle_node );
824 _Assert( idle->is_idle );
825 _Scheduler_Node_set_idle_user( node, idle );
826
827 return idle;
828}
829
841static inline void _Scheduler_Release_idle_thread(
842 Scheduler_Node *node,
843 const Thread_Control *idle,
844 Scheduler_Release_idle_node release_idle_node,
845 void *arg
846)
847{
848 Thread_Control *owner;
849 Scheduler_Node *idle_node;
850
851 owner = _Scheduler_Node_get_owner( node );
852 _Assert( _Scheduler_Node_get_user( node ) == idle );
853 _Scheduler_Node_set_user( node, owner );
854 node->idle = NULL;
855 idle_node = _Thread_Scheduler_get_home_node( idle );
856 ( *release_idle_node )( idle_node, arg );
857}
858
873static inline Thread_Control *_Scheduler_Release_idle_thread_if_necessary(
874 Scheduler_Node *node,
875 Scheduler_Release_idle_node release_idle_node,
876 void *arg
877)
878{
879 Thread_Control *idle;
880
881 idle = _Scheduler_Node_get_idle( node );
882
883 if ( idle != NULL ) {
884 _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
885 }
886
887 return idle;
888}
889
901static inline void _Scheduler_Discard_idle_thread(
902 Thread_Control *the_thread,
903 Scheduler_Node *node,
904 Scheduler_Release_idle_node release_idle_node,
905 void *arg
906)
907{
908 Thread_Control *idle;
909 Per_CPU_Control *cpu;
910
911 idle = _Scheduler_Node_get_idle( node );
912 _Scheduler_Release_idle_thread( node, idle, release_idle_node, arg );
913
914 cpu = _Thread_Get_CPU( idle );
915 _Thread_Set_CPU( the_thread, cpu );
916 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
917}
918#endif
919
931static inline Status_Control _Scheduler_Set(
932 const Scheduler_Control *new_scheduler,
933 Thread_Control *the_thread,
934 Priority_Control priority
935)
936{
937 Scheduler_Node *new_scheduler_node;
938 Scheduler_Node *old_scheduler_node;
939#if defined(RTEMS_SMP)
940 ISR_lock_Context lock_context;
941 const Scheduler_Control *old_scheduler;
942
943#endif
944
945#if defined(RTEMS_SCORE_THREAD_HAS_SCHEDULER_CHANGE_INHIBITORS)
946 if ( the_thread->is_scheduler_change_inhibited ) {
947 return STATUS_RESOURCE_IN_USE;
948 }
949#endif
950
951 if ( the_thread->Wait.queue != NULL ) {
952 return STATUS_RESOURCE_IN_USE;
953 }
954
955 old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
956 _Priority_Plain_extract(
957 &old_scheduler_node->Wait.Priority,
958 &the_thread->Real_priority
959 );
960
961 if (
962 !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
963#if defined(RTEMS_SMP)
964 || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
965 || the_thread->Scheduler.pin_level != 0
966#endif
967 ) {
968 _Priority_Plain_insert(
969 &old_scheduler_node->Wait.Priority,
970 &the_thread->Real_priority,
971 the_thread->Real_priority.priority
972 );
973 return STATUS_RESOURCE_IN_USE;
974 }
975
976#if defined(RTEMS_SMP)
977 old_scheduler = _Thread_Scheduler_get_home( the_thread );
978 new_scheduler_node = _Thread_Scheduler_get_node_by_index(
979 the_thread,
980 _Scheduler_Get_index( new_scheduler )
981 );
982
983 _Scheduler_Acquire_critical( new_scheduler, &lock_context );
984
985 if (
986 _Scheduler_Get_processor_count( new_scheduler ) == 0
987 || ( *new_scheduler->Operations.set_affinity )(
988 new_scheduler,
989 the_thread,
990 new_scheduler_node,
991 &the_thread->Scheduler.Affinity
992 ) != STATUS_SUCCESSFUL
993 ) {
994 _Scheduler_Release_critical( new_scheduler, &lock_context );
995 _Priority_Plain_insert(
996 &old_scheduler_node->Wait.Priority,
997 &the_thread->Real_priority,
998 the_thread->Real_priority.priority
999 );
1000 return STATUS_UNSATISFIED;
1001 }
1002
1003 _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1004 the_thread->Scheduler.home_scheduler = new_scheduler;
1005
1006 _Scheduler_Release_critical( new_scheduler, &lock_context );
1007
1008 _Thread_Scheduler_process_requests( the_thread );
1009#else
1010 new_scheduler_node = old_scheduler_node;
1011#endif
1012
1013 the_thread->Start.initial_priority = priority;
1014 _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1015 _Priority_Initialize_one(
1016 &new_scheduler_node->Wait.Priority,
1017 &the_thread->Real_priority
1018 );
1019
1020#if defined(RTEMS_SMP)
1021 if ( old_scheduler != new_scheduler ) {
1022 States_Control current_state;
1023
1024 current_state = the_thread->current_state;
1025
1026 if ( _States_Is_ready( current_state ) ) {
1027 _Scheduler_Block( the_thread );
1028 }
1029
1030 _Assert( old_scheduler_node->sticky_level == 0 );
1031 _Assert( new_scheduler_node->sticky_level == 0 );
1032
1033 _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1034 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1035 _Chain_Initialize_one(
1036 &the_thread->Scheduler.Wait_nodes,
1037 &new_scheduler_node->Thread.Wait_node
1038 );
1039 _Chain_Extract_unprotected(
1040 &old_scheduler_node->Thread.Scheduler_node.Chain
1041 );
1042 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1043 _Chain_Initialize_one(
1044 &the_thread->Scheduler.Scheduler_nodes,
1045 &new_scheduler_node->Thread.Scheduler_node.Chain
1046 );
1047
1048 _Scheduler_Node_set_priority(
1049 new_scheduler_node,
1050 priority,
1052 );
1053
1054 if ( _States_Is_ready( current_state ) ) {
1055 _Scheduler_Unblock( the_thread );
1056 }
1057
1058 return STATUS_SUCCESSFUL;
1059 }
1060#endif
1061
1062 _Scheduler_Node_set_priority(
1063 new_scheduler_node,
1064 priority,
1066 );
1067 _Scheduler_Update_priority( the_thread );
1068 return STATUS_SUCCESSFUL;
1069}
1070
1073#ifdef __cplusplus
1074}
1075#endif
1076
1077#endif
1078/* end of include file */
This header file provides the interfaces of the Assert Handler.
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG and static analysis runs.
Definition: assert.h:96
#define _ISR_lock_Release(_lock, _context)
Releases an ISR lock inside an ISR disabled section.
Definition: isrlock.h:282
#define _ISR_lock_Acquire(_lock, _context)
Acquires an ISR lock inside an ISR disabled section.
Definition: isrlock.h:259
uint32_t Objects_Id
Definition: object.h:101
#define _Objects_Build_id(the_api, the_class, node, index)
Builds an object ID from its components.
Definition: object.h:338
#define _Objects_Local_node
The local MPCI node number.
Definition: object.h:368
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:91
@ PRIORITY_GROUP_LAST
Priority group last option requests that the priority node is inserted as the last node into its prio...
Definition: priorityimpl.h:73
void _Scheduler_Handler_initialization(void)
Initializes the scheduler to the policy chosen by the user.
Definition: scheduler.c:44
Status_Control _Scheduler_Get_affinity(Thread_Control *the_thread, size_t cpusetsize, cpu_set_t *cpuset)
Copies the thread's scheduler's affinity to the given cpuset.
Definition: schedulergetaffinity.c:43
#define _Scheduler_Count
This constant contains the count of configured schedulers.
Definition: scheduler.h:395
const Scheduler_Control _Scheduler_Table[]
This table contains the configured schedulers.
Status_Control _Scheduler_Set_affinity(Thread_Control *the_thread, size_t cpusetsize, const cpu_set_t *cpuset)
Sets the thread's scheduler's affinity.
Definition: schedulersetaffinity.c:43
uint32_t States_Control
Definition: states.h:65
Status_Control
Status codes.
Definition: status.h:111
rtems_termios_device_context * context
Definition: console-config.c:62
This header file provides interfaces of the Priority Handler which are only used by the implementatio...
This header file provides interfaces of the Scheduler Handler which are used by the implementation an...
This header file provides the interfaces of the Operation Status Support.
This header file provides interfaces of the SMP Support which are only used by the implementation.
This structure represents a chain node.
Definition: chain.h:78
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:94
Per CPU Core Structure.
Definition: percpu.h:384
The priority node to build up a priority aggregation.
Definition: priority.h:112
Priority_Control priority
The priority value of this node.
Definition: priority.h:124
Scheduler context.
Definition: scheduler.h:318
Scheduler node for per-thread data.
Definition: schedulernode.h:94
struct Scheduler_Node::@4406 Wait
Thread wait support block.
void(* yield)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:70
Priority_Control(* unmap_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:104
void(* unblock)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:84
Priority_Control(* map_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:98
void(* cancel_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, Thread_queue_Context *)
Definition: scheduler.h:287
void(* block)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:77
void(* release_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, uint64_t, Thread_queue_Context *)
Definition: scheduler.h:278
void(* node_destroy)(const Scheduler_Control *, Scheduler_Node *)
Definition: scheduler.h:275
void(* node_initialize)(const Scheduler_Control *, Scheduler_Node *, Thread_Control *, Priority_Control)
Definition: scheduler.h:267
void(* update_priority)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:91
void(* schedule)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:67
void(* start_idle)(const Scheduler_Control *, Thread_Control *, struct Per_CPU_Control *)
Definition: scheduler.h:295
Priority_Control initial_priority
Definition: thread.h:249
Thread_queue_Queue * queue
The current thread queue.
Definition: thread.h:550
Thread queue context for the thread queue methods.
Definition: threadq.h:216
Scheduler control.
Definition: scheduler.h:337
Scheduler_Context * context
Reference to a statically allocated scheduler context.
Definition: scheduler.h:341
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:346
Definition: thread.h:837
Thread_Wait_information Wait
Definition: thread.h:877
Priority_Node Real_priority
The base priority of this thread in its home scheduler instance.
Definition: thread.h:864
States_Control current_state
Definition: thread.h:859
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:874
Thread_Start_information Start
Definition: thread.h:956
bool is_idle
Definition: thread.h:908
This header file provides interfaces of the Thread Handler which are only used by the implementation.