RTEMS
scheduleredfsmp.c
Go to the documentation of this file.
1 
9 /*
10  * Copyright (c) 2017 embedded brains GmbH.
11  *
12  * The license and distribution terms for this file may be
13  * found in the file LICENSE in this distribution or at
14  * http://www.rtems.org/license/LICENSE.
15  */
16 
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20 
23 
24 static inline Scheduler_EDF_SMP_Context *
25 _Scheduler_EDF_SMP_Get_context( const Scheduler_Control *scheduler )
26 {
27  return (Scheduler_EDF_SMP_Context *) _Scheduler_Get_context( scheduler );
28 }
29 
30 static inline Scheduler_EDF_SMP_Context *
31 _Scheduler_EDF_SMP_Get_self( Scheduler_Context *context )
32 {
33  return (Scheduler_EDF_SMP_Context *) context;
34 }
35 
36 static inline Scheduler_EDF_SMP_Node *
37 _Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
38 {
39  return (Scheduler_EDF_SMP_Node *) node;
40 }
41 
42 static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
43  const void *left,
44  const RBTree_Node *right
45 )
46 {
47  const Priority_Control *the_left;
48  const Scheduler_SMP_Node *the_right;
49  Priority_Control prio_left;
50  Priority_Control prio_right;
51 
52  the_left = left;
53  the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
54 
55  prio_left = *the_left;
56  prio_right = the_right->priority;
57 
58  return prio_left <= prio_right;
59 }
60 
62 {
64  _Scheduler_EDF_SMP_Get_context( scheduler );
65 
66  _Scheduler_SMP_Initialize( &self->Base );
67  _Chain_Initialize_empty( &self->Affine_queues );
68  /* The ready queues are zero initialized and thus empty */
69 }
70 
72  const Scheduler_Control *scheduler,
73  Scheduler_Node *node,
74  Thread_Control *the_thread,
75  Priority_Control priority
76 )
77 {
78  Scheduler_SMP_Node *smp_node;
79 
80  smp_node = _Scheduler_SMP_Node_downcast( node );
81  _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
82 }
83 
84 static inline void _Scheduler_EDF_SMP_Do_update(
85  Scheduler_Context *context,
86  Scheduler_Node *node,
87  Priority_Control new_priority
88 )
89 {
90  Scheduler_SMP_Node *smp_node;
91 
92  (void) context;
93 
94  smp_node = _Scheduler_SMP_Node_downcast( node );
95  _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
96 }
97 
98 static inline bool _Scheduler_EDF_SMP_Has_ready( Scheduler_Context *context )
99 {
100  Scheduler_EDF_SMP_Context *self = _Scheduler_EDF_SMP_Get_self( context );
101 
102  return !_RBTree_Is_empty( &self->Ready[ 0 ].Queue );
103 }
104 
105 static inline bool _Scheduler_EDF_SMP_Overall_less(
106  const Scheduler_EDF_SMP_Node *left,
107  const Scheduler_EDF_SMP_Node *right
108 )
109 {
110  Priority_Control lp;
111  Priority_Control rp;
112 
113  lp = left->Base.priority;
114  rp = right->Base.priority;
115 
116  return lp < rp || (lp == rp && left->generation < right->generation );
117 }
118 
119 static inline Scheduler_EDF_SMP_Node *
120 _Scheduler_EDF_SMP_Challenge_highest_ready(
122  Scheduler_EDF_SMP_Node *highest_ready,
123  RBTree_Control *ready_queue
124 )
125 {
126  Scheduler_EDF_SMP_Node *other;
127 
128  other = (Scheduler_EDF_SMP_Node *) _RBTree_Minimum( ready_queue );
129  _Assert( other != NULL );
130 
131  if ( _Scheduler_EDF_SMP_Overall_less( other, highest_ready ) ) {
132  return other;
133  }
134 
135  return highest_ready;
136 }
137 
138 static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
139  Scheduler_Context *context,
140  Scheduler_Node *filter
141 )
142 {
144  Scheduler_EDF_SMP_Node *highest_ready;
146  uint8_t rqi;
147  const Chain_Node *tail;
148  Chain_Node *next;
149 
150  self = _Scheduler_EDF_SMP_Get_self( context );
151  highest_ready = (Scheduler_EDF_SMP_Node *)
152  _RBTree_Minimum( &self->Ready[ 0 ].Queue );
153  _Assert( highest_ready != NULL );
154 
155  /*
156  * The filter node is a scheduled node which is no longer on the scheduled
157  * chain. In case this is an affine thread, then we have to check the
158  * corresponding affine ready queue.
159  */
160 
161  node = (Scheduler_EDF_SMP_Node *) filter;
162  rqi = node->ready_queue_index;
163 
164  if ( rqi != 0 && !_RBTree_Is_empty( &self->Ready[ rqi ].Queue ) ) {
165  highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
166  self,
167  highest_ready,
168  &self->Ready[ rqi ].Queue
169  );
170  }
171 
172  tail = _Chain_Immutable_tail( &self->Affine_queues );
173  next = _Chain_First( &self->Affine_queues );
174 
175  while ( next != tail ) {
176  Scheduler_EDF_SMP_Ready_queue *ready_queue;
177 
178  ready_queue = (Scheduler_EDF_SMP_Ready_queue *) next;
179  highest_ready = _Scheduler_EDF_SMP_Challenge_highest_ready(
180  self,
181  highest_ready,
182  &ready_queue->Queue
183  );
184 
185  next = _Chain_Next( next );
186  }
187 
188  return &highest_ready->Base.Base;
189 }
190 
191 static inline void _Scheduler_EDF_SMP_Set_scheduled(
193  Scheduler_EDF_SMP_Node *scheduled,
194  const Per_CPU_Control *cpu
195 )
196 {
197  self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
198 }
199 
200 static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
201  const Scheduler_EDF_SMP_Context *self,
202  uint8_t rqi
203 )
204 {
205  return self->Ready[ rqi ].scheduled;
206 }
207 
208 static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
209  Scheduler_Context *context,
210  Scheduler_Node *filter_base
211 )
212 {
213  Scheduler_EDF_SMP_Node *filter;
214  uint8_t rqi;
215 
216  filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
217  rqi = filter->ready_queue_index;
218 
219  if ( rqi != 0 ) {
222 
223  self = _Scheduler_EDF_SMP_Get_self( context );
224  node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
225 
226  if ( node->ready_queue_index > 0 ) {
227  _Assert( node->ready_queue_index == rqi );
228  return &node->Base.Base;
229  }
230  }
231 
232  return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
233 }
234 
235 static inline void _Scheduler_EDF_SMP_Insert_ready(
236  Scheduler_Context *context,
237  Scheduler_Node *node_base,
238  Priority_Control insert_priority
239 )
240 {
243  uint8_t rqi;
244  Scheduler_EDF_SMP_Ready_queue *ready_queue;
245  int generation_index;
246  int increment;
247  int64_t generation;
248 
249  self = _Scheduler_EDF_SMP_Get_self( context );
250  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
251  rqi = node->ready_queue_index;
252  generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
253  increment = ( generation_index << 1 ) - 1;
254  ready_queue = &self->Ready[ rqi ];
255 
256  generation = self->generations[ generation_index ];
257  node->generation = generation;
258  self->generations[ generation_index ] = generation + increment;
259 
260  _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
262  &ready_queue->Queue,
263  &node->Base.Base.Node.RBTree,
264  &insert_priority,
265  _Scheduler_EDF_SMP_Priority_less_equal
266  );
267 
268  if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
269  Scheduler_EDF_SMP_Node *scheduled;
270 
271  scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
272 
273  if ( scheduled->ready_queue_index == 0 ) {
274  _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
275  }
276  }
277 }
278 
279 static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
280  Scheduler_Context *context,
281  Scheduler_Node *node_to_extract
282 )
283 {
286  uint8_t rqi;
287  Scheduler_EDF_SMP_Ready_queue *ready_queue;
288 
289  self = _Scheduler_EDF_SMP_Get_self( context );
290  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
291 
292  _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
293 
294  rqi = node->ready_queue_index;
295  ready_queue = &self->Ready[ rqi ];
296 
297  if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
298  _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
299  }
300 }
301 
302 static inline void _Scheduler_EDF_SMP_Extract_from_ready(
303  Scheduler_Context *context,
304  Scheduler_Node *node_to_extract
305 )
306 {
309  uint8_t rqi;
310  Scheduler_EDF_SMP_Ready_queue *ready_queue;
311 
312  self = _Scheduler_EDF_SMP_Get_self( context );
313  node = _Scheduler_EDF_SMP_Node_downcast( node_to_extract );
314  rqi = node->ready_queue_index;
315  ready_queue = &self->Ready[ rqi ];
316 
317  _RBTree_Extract( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
318  _Chain_Initialize_node( &node->Base.Base.Node.Chain );
319 
320  if (
321  rqi != 0
322  && _RBTree_Is_empty( &ready_queue->Queue )
323  && !_Chain_Is_node_off_chain( &ready_queue->Node )
324  ) {
325  _Chain_Extract_unprotected( &ready_queue->Node );
326  _Chain_Set_off_chain( &ready_queue->Node );
327  }
328 }
329 
330 static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
331  Scheduler_Context *context,
332  Scheduler_Node *scheduled_to_ready
333 )
334 {
335  Priority_Control insert_priority;
336 
337  _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
338  insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
339  _Scheduler_EDF_SMP_Insert_ready(
340  context,
341  scheduled_to_ready,
342  insert_priority
343  );
344 }
345 
346 static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
347  Scheduler_Context *context,
348  Scheduler_Node *ready_to_scheduled
349 )
350 {
351  Priority_Control insert_priority;
352 
353  _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
354  insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
355  insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
357  context,
358  ready_to_scheduled,
359  insert_priority
360  );
361 }
362 
363 static inline void _Scheduler_EDF_SMP_Allocate_processor(
364  Scheduler_Context *context,
365  Scheduler_Node *scheduled_base,
366  Scheduler_Node *victim_base,
367  Per_CPU_Control *victim_cpu
368 )
369 {
371  Scheduler_EDF_SMP_Node *scheduled;
372  uint8_t rqi;
373 
374  (void) victim_base;
375  self = _Scheduler_EDF_SMP_Get_self( context );
376  scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
377  rqi = scheduled->ready_queue_index;
378 
379  if ( rqi != 0 ) {
380  Scheduler_EDF_SMP_Ready_queue *ready_queue;
381  Per_CPU_Control *desired_cpu;
382 
383  ready_queue = &self->Ready[ rqi ];
384 
385  if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
386  _Chain_Extract_unprotected( &ready_queue->Node );
387  _Chain_Set_off_chain( &ready_queue->Node );
388  }
389 
390  desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
391 
392  if ( victim_cpu != desired_cpu ) {
394 
395  node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
396  _Assert( node->ready_queue_index == 0 );
397  _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
399  context,
400  &node->Base.Base,
401  NULL,
402  victim_cpu
403  );
404  victim_cpu = desired_cpu;
405  }
406  }
407 
408  _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
410  context,
411  &scheduled->Base.Base,
412  NULL,
413  victim_cpu
414  );
415 }
416 
418  const Scheduler_Control *scheduler,
419  Thread_Control *thread,
420  Scheduler_Node *node
421 )
422 {
423  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
424 
426  context,
427  thread,
428  node,
429  _Scheduler_EDF_SMP_Extract_from_scheduled,
430  _Scheduler_EDF_SMP_Extract_from_ready,
431  _Scheduler_EDF_SMP_Get_highest_ready,
432  _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
433  _Scheduler_EDF_SMP_Allocate_processor
434  );
435 }
436 
437 static inline bool _Scheduler_EDF_SMP_Enqueue(
438  Scheduler_Context *context,
439  Scheduler_Node *node,
440  Priority_Control insert_priority
441 )
442 {
443  return _Scheduler_SMP_Enqueue(
444  context,
445  node,
446  insert_priority,
448  _Scheduler_EDF_SMP_Insert_ready,
450  _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
451  _Scheduler_EDF_SMP_Get_lowest_scheduled,
452  _Scheduler_EDF_SMP_Allocate_processor
453  );
454 }
455 
456 static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
457  Scheduler_Context *context,
458  Scheduler_Node *node,
459  Priority_Control insert_priority
460 )
461 {
463  context,
464  node,
465  insert_priority,
467  _Scheduler_EDF_SMP_Extract_from_ready,
468  _Scheduler_EDF_SMP_Get_highest_ready,
469  _Scheduler_EDF_SMP_Insert_ready,
471  _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
472  _Scheduler_EDF_SMP_Allocate_processor
473  );
474 }
475 
477  const Scheduler_Control *scheduler,
478  Thread_Control *thread,
479  Scheduler_Node *node
480 )
481 {
482  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
483 
485  context,
486  thread,
487  node,
488  _Scheduler_EDF_SMP_Do_update,
489  _Scheduler_EDF_SMP_Enqueue
490  );
491 }
492 
493 static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
494  Scheduler_Context *context,
495  Thread_Control *the_thread,
496  Scheduler_Node *node
497 )
498 {
500  context,
501  the_thread,
502  node,
504  _Scheduler_EDF_SMP_Insert_ready,
506  _Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
507  _Scheduler_EDF_SMP_Get_lowest_scheduled,
508  _Scheduler_EDF_SMP_Allocate_processor
509  );
510 }
511 
513  const Scheduler_Control *scheduler,
514  Thread_Control *thread,
515  Scheduler_Node *node
516 )
517 {
518  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
519 
521  context,
522  thread,
523  node,
524  _Scheduler_EDF_SMP_Extract_from_ready,
525  _Scheduler_EDF_SMP_Do_update,
526  _Scheduler_EDF_SMP_Enqueue,
527  _Scheduler_EDF_SMP_Enqueue_scheduled,
528  _Scheduler_EDF_SMP_Do_ask_for_help
529  );
530 }
531 
533  const Scheduler_Control *scheduler,
534  Thread_Control *the_thread,
535  Scheduler_Node *node
536 )
537 {
538  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
539 
540  return _Scheduler_EDF_SMP_Do_ask_for_help( context, the_thread, node );
541 }
542 
544  const Scheduler_Control *scheduler,
545  Thread_Control *the_thread,
546  Scheduler_Node *node
547 )
548 {
549  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
550 
552  context,
553  the_thread,
554  node,
555  _Scheduler_EDF_SMP_Extract_from_ready
556  );
557 }
558 
560  const Scheduler_Control *scheduler,
561  Thread_Control *the_thread,
562  Scheduler_Node *node,
563  Thread_Scheduler_state next_state
564 )
565 {
566  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
567 
569  context,
570  the_thread,
571  node,
572  next_state,
573  _Scheduler_EDF_SMP_Extract_from_ready,
574  _Scheduler_EDF_SMP_Get_highest_ready,
575  _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
576  _Scheduler_EDF_SMP_Allocate_processor
577  );
578 }
579 
580 static inline void _Scheduler_EDF_SMP_Register_idle(
581  Scheduler_Context *context,
582  Scheduler_Node *idle_base,
583  Per_CPU_Control *cpu
584 )
585 {
588 
589  self = _Scheduler_EDF_SMP_Get_self( context );
590  idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
591  _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
592 }
593 
595  const Scheduler_Control *scheduler,
596  Thread_Control *idle
597 )
598 {
599  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
600 
602  context,
603  idle,
604  _Scheduler_EDF_SMP_Has_ready,
605  _Scheduler_EDF_SMP_Enqueue_scheduled,
606  _Scheduler_EDF_SMP_Register_idle
607  );
608 }
609 
611  const Scheduler_Control *scheduler,
612  Per_CPU_Control *cpu
613 )
614 {
615  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
616 
618  context,
619  cpu,
620  _Scheduler_EDF_SMP_Extract_from_ready,
621  _Scheduler_EDF_SMP_Enqueue
622  );
623 }
624 
626  const Scheduler_Control *scheduler,
627  Thread_Control *thread,
628  Scheduler_Node *node
629 )
630 {
631  Scheduler_Context *context = _Scheduler_Get_context( scheduler );
632 
634  context,
635  thread,
636  node,
637  _Scheduler_EDF_SMP_Extract_from_ready,
638  _Scheduler_EDF_SMP_Enqueue,
639  _Scheduler_EDF_SMP_Enqueue_scheduled
640  );
641 }
642 
643 static inline void _Scheduler_EDF_SMP_Do_set_affinity(
644  Scheduler_Context *context,
645  Scheduler_Node *node_base,
646  void *arg
647 )
648 {
650  const uint8_t *rqi;
651 
652  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
653  rqi = arg;
654  node->ready_queue_index = *rqi;
655 }
656 
658  const Scheduler_Control *scheduler,
659  Thread_Control *idle,
660  Per_CPU_Control *cpu
661 )
662 {
663  Scheduler_Context *context;
664 
665  context = _Scheduler_Get_context( scheduler );
666 
668  context,
669  idle,
670  cpu,
671  _Scheduler_EDF_SMP_Register_idle
672  );
673 }
674 
676  const Scheduler_Control *scheduler,
677  Thread_Control *thread,
678  Scheduler_Node *node_base,
679  struct Per_CPU_Control *cpu
680 )
681 {
683  uint8_t rqi;
684 
685  (void) scheduler;
686  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
687  rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
688 
689  _Assert(
691  );
692 
693  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
694  node->ready_queue_index = rqi;
695  node->pinning_ready_queue_index = rqi;
696 }
697 
699  const Scheduler_Control *scheduler,
700  Thread_Control *thread,
701  Scheduler_Node *node_base,
702  struct Per_CPU_Control *cpu
703 )
704 {
706 
707  (void) scheduler;
708  (void) cpu;
709  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
710 
711  _Assert(
713  );
714 
716  node->pinning_ready_queue_index = 0;
717 }
718 
720  const Scheduler_Control *scheduler,
721  Thread_Control *thread,
722  Scheduler_Node *node_base,
723  const Processor_mask *affinity
724 )
725 {
726  Scheduler_Context *context;
728  Processor_mask local_affinity;
729  uint8_t rqi;
730 
731  context = _Scheduler_Get_context( scheduler );
732  _Processor_mask_And( &local_affinity, &context->Processors, affinity );
733 
734  if ( _Processor_mask_Is_zero( &local_affinity ) ) {
735  return false;
736  }
737 
738  if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
739  rqi = 0;
740  } else {
741  rqi = _Processor_mask_Find_last_set( &local_affinity );
742  }
743 
744  node = _Scheduler_EDF_SMP_Node_downcast( node_base );
745  node->affinity_ready_queue_index = rqi;
746 
747  if ( node->pinning_ready_queue_index == 0 ) {
749  context,
750  thread,
751  node_base,
752  &rqi,
753  _Scheduler_EDF_SMP_Do_set_affinity,
754  _Scheduler_EDF_SMP_Extract_from_ready,
755  _Scheduler_EDF_SMP_Get_highest_ready,
756  _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
757  _Scheduler_EDF_SMP_Enqueue,
758  _Scheduler_EDF_SMP_Allocate_processor
759  );
760  }
761 
762  return true;
763 }
#define SCHEDULER_PRIORITY_IS_APPEND(priority)
Returns true, if the item should be appended to its priority group, otherwise returns false and the i...
uint8_t ready_queue_index
The ready queue index depending on the processor affinity and pinning of the thread.
uint8_t pinning_ready_queue_index
Ready queue index according to thread pinning.
RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_zero(const Processor_mask *mask)
Checks if the mask is zero, also considers CPU_MAXIMUM_PROCESSORS.
Definition: processormask.h:74
static __inline__ Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain&#39;s first node.
Definition: chainimpl.h:260
static void _Scheduler_SMP_Add_processor(Scheduler_Context *context, Thread_Control *idle, Scheduler_SMP_Has_ready has_ready, Scheduler_SMP_Enqueue enqueue_scheduled, Scheduler_SMP_Register_idle register_idle)
Adds the idle thread to the processor.
RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_equal(const Processor_mask *a, const Processor_mask *b)
Checks if the processor sets a and b are equal.
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
static __inline__ bool _RBTree_Insert_inline(RBTree_Control *the_rbtree, RBTree_Node *the_node, const void *key, bool(*less)(const void *, const RBTree_Node *))
Inserts the node into the red-black tree.
Definition: rbtree.h:508
void _Scheduler_EDF_SMP_Initialize(const Scheduler_Control *scheduler)
Initializes the context of the scheduler control.
Scheduler context.
Definition: scheduler.h:247
static __inline__ bool _Chain_Is_node_off_chain(const Chain_Node *node)
Checks if the node is off chain.
Definition: chainimpl.h:142
static void _Scheduler_SMP_Extract_from_scheduled(Scheduler_Context *context, Scheduler_Node *node)
Extracts a scheduled node from the scheduled nodes.
static void _Scheduler_SMP_Withdraw_node(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Thread_Scheduler_state next_state, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Withdraws the node.
static Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(const Scheduler_Node *node)
Gets the state of the node.
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
static __inline__ void _Chain_Initialize_node(Chain_Node *the_node)
Initializes a chain node.
Definition: chainimpl.h:122
union Scheduler_Node::@18 Node
Chain node for usage in various scheduler data structures.
bool _Scheduler_EDF_SMP_Set_affinity(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, const Processor_mask *affinity)
Checks if the processor set of the scheduler is the subset of the affinity set.
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:114
static void _Scheduler_SMP_Reconsider_help_request(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready)
Reconsiders help request.
static bool _Scheduler_SMP_Ask_for_help(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Chain_Node_order order, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_scheduled_to_ready, Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Asks for help.
void _Scheduler_EDF_SMP_Unblock(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Unblocks the thread.
static bool _Scheduler_SMP_Priority_less_equal(const void *to_insert, const Chain_Node *next)
Checks if to_insert is less or equal than the priority of the chain node.
Red-black tree node.
Definition: rbtree.h:55
RBTree_Node * _RBTree_Minimum(const RBTree_Control *the_rbtree)
Returns the minimum node of the red-black tree.
Definition: rbtreenext.c:36
static Thread_Control * _Scheduler_SMP_Remove_processor(Scheduler_Context *context, Per_CPU_Control *cpu, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Enqueue enqueue)
Removes an idle thread from the processor.
RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Find_last_set(const Processor_mask *a)
Finds the last set of the processor mask.
void _Scheduler_EDF_SMP_Update_priority(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Updates the priority of the node.
static void _Scheduler_SMP_Block(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_scheduled, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Blocks the thread.
void _Scheduler_EDF_SMP_Pin(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, struct Per_CPU_Control *cpu)
Pin thread operation.
static void _Scheduler_SMP_Set_affinity(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, void *arg, Scheduler_SMP_Set_affinity set_affinity, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Allocate_processor allocate_processor)
Sets the affinity of the node.
#define RTEMS_CONTAINER_OF(_m, _type, _member_name)
Returns the pointer to the container of a specified member pointer.
Definition: basedefs.h:550
static __inline__ void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Appends a node (unprotected).
Definition: chainimpl.h:680
static void _Scheduler_SMP_Node_initialize(const Scheduler_Control *scheduler, Scheduler_SMP_Node *node, Thread_Control *thread, Priority_Control priority)
Initializes the scheduler smp node.
static __inline__ bool _RBTree_Is_empty(const RBTree_Control *the_rbtree)
Checks if the RBTree is empty.
Definition: rbtree.h:375
This scheduler node is blocked.
Definition: schedulersmp.h:76
Thread_Scheduler_state
The thread state with respect to the scheduler.
Definition: thread.h:214
Per CPU Core Structure.
Definition: percpu.h:347
RTEMS_INLINE_ROUTINE void _Processor_mask_And(Processor_mask *a, const Processor_mask *b, const Processor_mask *c)
Performs a bitwise a = b & c.
static __inline__ Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
void _Scheduler_EDF_SMP_Block(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Blocks the thread.
static Priority_Control _Scheduler_SMP_Node_priority(const Scheduler_Node *node)
Gets the priority of the node.
Processor_mask Processors
Lock to protect this scheduler instance.
Definition: scheduler.h:257
static bool _Scheduler_SMP_Enqueue_scheduled(Scheduler_Context *context, Scheduler_Node *const node, Priority_Control insert_priority, Chain_Node_order order, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Get_highest_ready get_highest_ready, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_ready_to_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Enqueues a scheduled node according to the specified order function.
static __inline__ void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:100
SMP Scheduler Implementation.
static void _Scheduler_SMP_Do_start_idle(Scheduler_Context *context, Thread_Control *idle, Per_CPU_Control *cpu, Scheduler_SMP_Register_idle register_idle)
Starts the idle thread on the given processor.
EDF SMP Scheduler API.
static void _Scheduler_SMP_Yield(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Enqueue enqueue_scheduled)
Performs a yield and asks for help if necessary.
void _Scheduler_EDF_SMP_Yield(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node)
Performs the yield of a thread.
int64_t generation
Generation number to ensure FIFO/LIFO order for threads of the same priority across different ready q...
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:104
static bool _Scheduler_SMP_Enqueue(Scheduler_Context *context, Scheduler_Node *node, Priority_Control insert_priority, Chain_Node_order order, Scheduler_SMP_Insert insert_ready, Scheduler_SMP_Insert insert_scheduled, Scheduler_SMP_Move move_from_scheduled_to_ready, Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled, Scheduler_SMP_Allocate_processor allocate_processor)
Enqueues a node according to the specified order function.
RBTree_Control Queue
The ready threads of the corresponding affinity.
static void _Scheduler_SMP_Node_update_priority(Scheduler_SMP_Node *node, Priority_Control new_priority)
Updates the priority of the node to the new priority.
void _Scheduler_EDF_SMP_Withdraw_node(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, Thread_Scheduler_state next_state)
Withdraws node operation.
static Scheduler_SMP_Node * _Scheduler_SMP_Node_downcast(Scheduler_Node *node)
Gets the scheduler smp node.
static __inline__ void _Chain_Initialize_empty(Chain_Control *the_chain)
Initializes this chain as empty.
Definition: chainimpl.h:505
void _Scheduler_EDF_SMP_Add_processor(const Scheduler_Control *scheduler, Thread_Control *idle)
Adds processor.
static void _Scheduler_SMP_Unblock(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Update update, Scheduler_SMP_Enqueue enqueue)
Unblocks the thread.
static __inline__ void _RBTree_Initialize_node(RBTree_Node *the_node)
Initializes a red-black tree node.
Definition: rbtree.h:129
static Scheduler_Node * _Scheduler_SMP_Get_lowest_scheduled(Scheduler_Context *context, Scheduler_Node *filter)
Returns the lowest member of the scheduled nodes.
Processor_mask _SMP_Online_processors
Set of online processors.
Definition: smp.c:36
Scheduler control.
Definition: scheduler.h:264
Scheduler node for per-thread data.
Definition: schedulernode.h:79
static __inline__ Scheduler_Context * _Scheduler_Get_context(const Scheduler_Control *scheduler)
Gets the context of the scheduler.
Definition: schedulerimpl.h:85
void _Scheduler_EDF_SMP_Unpin(const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, struct Per_CPU_Control *cpu)
Unpin thread operation.
static void _Scheduler_SMP_Insert_scheduled(Scheduler_Context *context, Scheduler_Node *node_to_insert, Priority_Control priority_to_insert)
Inserts the node with the given priority into the scheduled nodes.
static void _Scheduler_SMP_Allocate_processor_exact(Scheduler_Context *context, Scheduler_Node *scheduled, Scheduler_Node *victim, Per_CPU_Control *victim_cpu)
Allocates the cpu for the scheduled thread.
static __inline__ void _Chain_Set_off_chain(Chain_Node *node)
Sets off chain.
Definition: chainimpl.h:104
void _Scheduler_EDF_SMP_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes the node with the given priority.
void _RBTree_Extract(RBTree_Control *the_rbtree, RBTree_Node *the_node)
Extracts (removes) the node from the red-black tree.
Definition: rbtreeextract.c:35
Chain_Node Node
Chain node for Scheduler_SMP_Context::Affine_queues.
Thread_Control * _Scheduler_EDF_SMP_Remove_processor(const Scheduler_Control *scheduler, Per_CPU_Control *cpu)
Removes an idle thread from the given cpu.
Chain_Node Chain
The node for Thread_Control::Scheduler::Scheduler_nodes.
Definition: schedulernode.h:91
void _Scheduler_EDF_SMP_Reconsider_help_request(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Reconsiders help operation.
static __inline__ const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
static void _Scheduler_SMP_Update_priority(Scheduler_Context *context, Thread_Control *thread, Scheduler_Node *node, Scheduler_SMP_Extract extract_from_ready, Scheduler_SMP_Update update, Scheduler_SMP_Enqueue enqueue, Scheduler_SMP_Enqueue enqueue_scheduled, Scheduler_SMP_Ask_for_help ask_for_help)
Updates the priority of the node and the position in the queues it is in.
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
void _Scheduler_EDF_SMP_Start_idle(const Scheduler_Control *scheduler, Thread_Control *idle, Per_CPU_Control *cpu)
Starts an idle thread.
static void _Scheduler_SMP_Initialize(Scheduler_SMP_Context *self)
Initializes the scheduler smp context.
uint8_t affinity_ready_queue_index
Ready queue index according to thread affinity.
bool _Scheduler_EDF_SMP_Ask_for_help(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node)
Asks for help operation.