RTEMS
threadqenqueue.c
Go to the documentation of this file.
1 
9 /*
10  * COPYRIGHT (c) 1989-2014.
11  * On-Line Applications Research Corporation (OAR).
12  *
13  * Copyright (c) 2015, 2016 embedded brains GmbH.
14  *
15  * The license and distribution terms for this file may be
16  * found in the file LICENSE in this distribution or at
17  * http://www.rtems.org/license/LICENSE.
18  */
19 
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23 
25 #include <rtems/score/assert.h>
27 #include <rtems/score/threadimpl.h>
28 #include <rtems/score/status.h>
30 
31 #define THREAD_QUEUE_INTEND_TO_BLOCK \
32  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
33 
34 #define THREAD_QUEUE_BLOCKED \
35  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
36 
37 #define THREAD_QUEUE_READY_AGAIN \
38  (THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN)
39 
40 #if defined(RTEMS_SMP)
41 /*
42  * A global registry of active thread queue links is used to provide deadlock
43  * detection on SMP configurations. This is simple to implement and no
44  * additional storage is required for the thread queues. The disadvantage is
45  * the global registry is not scalable and may lead to lock contention.
46  * However, the registry is only used in case of nested resource conflicts. In
47  * this case, the application is already in trouble.
48  */
49 
50 typedef struct {
51  ISR_lock_Control Lock;
52 
53  RBTree_Control Links;
55 
56 static Thread_queue_Links _Thread_queue_Links = {
57  ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
58  RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links )
59 };
60 
61 static bool _Thread_queue_Link_equal(
62  const void *left,
63  const RBTree_Node *right
64 )
65 {
66  const Thread_queue_Queue *the_left;
67  const Thread_queue_Link *the_right;
68 
69  the_left = left;
70  the_right = (Thread_queue_Link *) right;
71 
72  return the_left == the_right->source;
73 }
74 
75 static bool _Thread_queue_Link_less(
76  const void *left,
77  const RBTree_Node *right
78 )
79 {
80  const Thread_queue_Queue *the_left;
81  const Thread_queue_Link *the_right;
82 
83  the_left = left;
84  the_right = (Thread_queue_Link *) right;
85 
86  return (uintptr_t) the_left < (uintptr_t) the_right->source;
87 }
88 
89 static void *_Thread_queue_Link_map( RBTree_Node *node )
90 {
91  return node;
92 }
93 
94 static Thread_queue_Link *_Thread_queue_Link_find(
95  Thread_queue_Links *links,
96  Thread_queue_Queue *source
97 )
98 {
99  return _RBTree_Find_inline(
100  &links->Links,
101  source,
102  _Thread_queue_Link_equal,
103  _Thread_queue_Link_less,
104  _Thread_queue_Link_map
105  );
106 }
107 
108 static bool _Thread_queue_Link_add(
109  Thread_queue_Link *link,
110  Thread_queue_Queue *source,
111  Thread_queue_Queue *target
112 )
113 {
114  Thread_queue_Links *links;
115  Thread_queue_Queue *recursive_target;
116  ISR_lock_Context lock_context;
117 
118  link->source = source;
119  link->target = target;
120 
121  links = &_Thread_queue_Links;
122  recursive_target = target;
123 
124  _ISR_lock_Acquire( &links->Lock, &lock_context );
125 
126  while ( true ) {
127  Thread_queue_Link *recursive_link;
128 
129  recursive_link = _Thread_queue_Link_find( links, recursive_target );
130 
131  if ( recursive_link == NULL ) {
132  break;
133  }
134 
135  recursive_target = recursive_link->target;
136 
137  if ( recursive_target == source ) {
138  _ISR_lock_Release( &links->Lock, &lock_context );
139  return false;
140  }
141  }
142 
144  &links->Links,
145  &link->Registry_node,
146  source,
147  _Thread_queue_Link_less
148  );
149 
150  _ISR_lock_Release( &links->Lock, &lock_context );
151  return true;
152 }
153 
154 static void _Thread_queue_Link_remove( Thread_queue_Link *link )
155 {
156  Thread_queue_Links *links;
157  ISR_lock_Context lock_context;
158 
159  links = &_Thread_queue_Links;
160 
161  _ISR_lock_Acquire( &links->Lock, &lock_context );
162  _RBTree_Extract( &links->Links, &link->Registry_node );
163  _ISR_lock_Release( &links->Lock, &lock_context );
164 }
165 #endif
166 
167 #if !defined(RTEMS_SMP)
168 static
169 #endif
171  Thread_queue_Context *queue_context
172 )
173 {
174 #if defined(RTEMS_SMP)
175  Chain_Node *head;
176  Chain_Node *node;
177 
178  head = _Chain_Head( &queue_context->Path.Links );
179  node = _Chain_Last( &queue_context->Path.Links );
180 
181  while ( head != node ) {
182  Thread_queue_Link *link;
183 
184  link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
185 
186  if ( link->Lock_context.Wait.queue != NULL ) {
187  _Thread_queue_Link_remove( link );
189  link->Lock_context.Wait.queue,
190  &link->Lock_context
191  );
193  } else {
195  link->owner,
197  );
198  }
199 
200  node = _Chain_Previous( node );
201 #if defined(RTEMS_DEBUG)
203 #endif
204  }
205 #else
206  (void) queue_context;
207 #endif
208 }
209 
210 #if defined(RTEMS_SMP)
211 static void _Thread_queue_Path_append_deadlock_thread(
212  Thread_Control *the_thread,
213  Thread_queue_Context *queue_context
214 )
215 {
216  Thread_Control *deadlock;
217 
218  /*
219  * In case of a deadlock, we must obtain the thread wait default lock for the
220  * first thread on the path that tries to enqueue on a thread queue. This
221  * thread can be identified by the thread wait operations. This lock acquire
222  * is necessary for the timeout and explicit thread priority changes, see
223  * _Thread_Priority_perform_actions().
224  */
225 
226  deadlock = NULL;
227 
228  while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
229  the_thread = the_thread->Wait.queue->owner;
230  deadlock = the_thread;
231  }
232 
233  if ( deadlock != NULL ) {
234  Thread_queue_Link *link;
235 
236  link = &queue_context->Path.Deadlock;
239  &queue_context->Path.Links,
240  &link->Path_node
241  );
242  link->owner = deadlock;
243  link->Lock_context.Wait.queue = NULL;
245  deadlock,
247  );
248  }
249 }
250 #endif
251 
252 #if !defined(RTEMS_SMP)
253 static
254 #endif
256  Thread_queue_Queue *queue,
257  Thread_Control *the_thread,
258  Thread_queue_Context *queue_context
259 )
260 {
261  Thread_Control *owner;
262 #if defined(RTEMS_SMP)
263  Thread_queue_Link *link;
264  Thread_queue_Queue *target;
265 
266  /*
267  * For an overview please look at the non-SMP part below. We basically do
268  * the same on SMP configurations. The fact that we may have more than one
269  * executing thread and each thread queue has its own SMP lock makes the task
270  * a bit more difficult. We have to avoid deadlocks at SMP lock level, since
271  * this would result in an unrecoverable deadlock of the overall system.
272  */
273 
274  _Chain_Initialize_empty( &queue_context->Path.Links );
275 
276  owner = queue->owner;
277 
278  if ( owner == NULL ) {
279  return true;
280  }
281 
282  if ( owner == the_thread ) {
283  return false;
284  }
285 
287  &queue_context->Path.Start.Lock_context.Wait.Gate.Node
288  );
289  link = &queue_context->Path.Start;
292 
293  do {
294  _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
295  link->owner = owner;
296 
298  owner,
300  );
301 
302  target = owner->Wait.queue;
303  link->Lock_context.Wait.queue = target;
304 
305  if ( target != NULL ) {
306  if ( _Thread_queue_Link_add( link, queue, target ) ) {
308  &owner->Wait.Lock.Pending_requests,
309  &link->Lock_context.Wait.Gate
310  );
312  owner,
314  );
316 
317  if ( link->Lock_context.Wait.queue == NULL ) {
318  _Thread_queue_Link_remove( link );
321  owner,
323  );
325  _Assert( owner->Wait.queue == NULL );
326  return true;
327  }
328  } else {
329  link->Lock_context.Wait.queue = NULL;
330  _Thread_queue_Path_append_deadlock_thread( owner, queue_context );
331  return false;
332  }
333  } else {
334  return true;
335  }
336 
337  link = &owner->Wait.Link;
338  queue = target;
339  owner = queue->owner;
340  } while ( owner != NULL );
341 #else
342  do {
343  owner = queue->owner;
344 
345  if ( owner == NULL ) {
346  return true;
347  }
348 
349  if ( owner == the_thread ) {
350  return false;
351  }
352 
353  queue = owner->Wait.queue;
354  } while ( queue != NULL );
355 #endif
356 
357  return true;
358 }
359 
361  Thread_queue_Queue *queue,
362  Thread_Control *the_thread,
363  Per_CPU_Control *cpu_self,
364  Thread_queue_Context *queue_context
365 )
366 {
367  /* Do nothing */
368 }
369 
371 {
372  the_thread->Wait.return_code = STATUS_DEADLOCK;
373 }
374 
376 {
377  _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
378 }
379 
381  Thread_queue_Queue *queue,
382  const Thread_queue_Operations *operations,
383  Thread_Control *the_thread,
384  Thread_queue_Context *queue_context
385 )
386 {
387  Per_CPU_Control *cpu_self;
388  bool success;
389 
390  _Assert( queue_context->enqueue_callout != NULL );
391 
392 #if defined(RTEMS_MULTIPROCESSING)
393  if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
394  the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state );
395  }
396 #endif
397 
398  _Thread_Wait_claim( the_thread, queue );
399 
400  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
401  _Thread_queue_Path_release_critical( queue_context );
402  _Thread_Wait_restore_default( the_thread );
403  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
404  _Thread_Wait_tranquilize( the_thread );
405  _Assert( queue_context->deadlock_callout != NULL );
406  ( *queue_context->deadlock_callout )( the_thread );
407  return;
408  }
409 
411  _Thread_Wait_claim_finalize( the_thread, operations );
412  ( *operations->enqueue )( queue, the_thread, queue_context );
413 
414  _Thread_queue_Path_release_critical( queue_context );
415 
416  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
417  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
418  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
419  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
420 
421  ( *queue_context->enqueue_callout )(
422  queue,
423  the_thread,
424  cpu_self,
425  queue_context
426  );
427 
428  /*
429  * Set the blocking state for this thread queue in the thread.
430  */
431  _Thread_Set_state( the_thread, queue_context->thread_state );
432 
433  /*
434  * At this point thread dispatching is disabled, however, we already released
435  * the thread queue lock. Thus, interrupts or threads on other processors
436  * may already changed our state with respect to the thread queue object.
437  * The request could be satisfied or timed out. This situation is indicated
438  * by the thread wait flags. Other parties must not modify our thread state
439  * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
440  * thus we have to cancel the blocking operation ourself if necessary.
441  */
443  the_thread,
444  THREAD_QUEUE_INTEND_TO_BLOCK,
445  THREAD_QUEUE_BLOCKED
446  );
447  if ( !success ) {
448  _Thread_Remove_timer_and_unblock( the_thread, queue );
449  }
450 
451  _Thread_Priority_update( queue_context );
452  _Thread_Dispatch_direct( cpu_self );
453 }
454 
455 #if defined(RTEMS_SMP)
457  Thread_queue_Queue *queue,
458  const Thread_queue_Operations *operations,
459  Thread_Control *the_thread,
460  Thread_queue_Context *queue_context
461 )
462 {
463  Per_CPU_Control *cpu_self;
464 
465  _Assert( queue_context->enqueue_callout != NULL );
466 
467  _Thread_Wait_claim( the_thread, queue );
468 
469  if ( !_Thread_queue_Path_acquire_critical( queue, the_thread, queue_context ) ) {
470  _Thread_queue_Path_release_critical( queue_context );
471  _Thread_Wait_restore_default( the_thread );
472  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
473  _Thread_Wait_tranquilize( the_thread );
474  ( *queue_context->deadlock_callout )( the_thread );
475  return _Thread_Wait_get_status( the_thread );
476  }
477 
479  _Thread_Wait_claim_finalize( the_thread, operations );
480  ( *operations->enqueue )( queue, the_thread, queue_context );
481 
482  _Thread_queue_Path_release_critical( queue_context );
483 
484  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
485  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
486  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
487  _Thread_queue_Queue_release( queue, &queue_context->Lock_context.Lock_context );
488 
489  if ( cpu_self->thread_dispatch_disable_level != 1 ) {
491  INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE
492  );
493  }
494 
495  ( *queue_context->enqueue_callout )(
496  queue,
497  the_thread,
498  cpu_self,
499  queue_context
500  );
501 
502  _Thread_Priority_update( queue_context );
503  _Thread_Priority_and_sticky_update( the_thread, 1 );
504  _Thread_Dispatch_enable( cpu_self );
505 
506  while (
507  _Thread_Wait_flags_get_acquire( the_thread ) == THREAD_QUEUE_INTEND_TO_BLOCK
508  ) {
509  /* Wait */
510  }
511 
512  _Thread_Wait_tranquilize( the_thread );
513  _Thread_Timer_remove( the_thread );
514  return _Thread_Wait_get_status( the_thread );
515 }
516 #endif
517 
518 #if defined(RTEMS_MULTIPROCESSING)
519 static bool _Thread_queue_MP_set_callout(
520  Thread_Control *the_thread,
521  const Thread_queue_Context *queue_context
522 )
523 {
524  Thread_Proxy_control *the_proxy;
525  Thread_queue_MP_callout mp_callout;
526 
527  if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
528  return false;
529  }
530 
531  the_proxy = (Thread_Proxy_control *) the_thread;
532  mp_callout = queue_context->mp_callout;
533  _Assert( mp_callout != NULL );
534  the_proxy->thread_queue_callout = mp_callout;
535  return true;
536 }
537 #endif
538 
539 static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
540 {
541  bool success;
542  bool unblock;
543 
544  /*
545  * We must update the wait flags under protection of the current thread lock,
546  * otherwise a _Thread_Timeout() running on another processor may interfere.
547  */
549  the_thread,
550  THREAD_QUEUE_INTEND_TO_BLOCK,
551  THREAD_QUEUE_READY_AGAIN
552  );
553  if ( success ) {
554  unblock = false;
555  } else {
556  _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
557  _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
558  unblock = true;
559  }
560 
561  _Thread_Wait_restore_default( the_thread );
562  return unblock;
563 }
564 
566  Thread_queue_Queue *queue,
567  const Thread_queue_Operations *operations,
568  Thread_Control *the_thread,
569  Thread_queue_Context *queue_context
570 )
571 {
572 #if defined(RTEMS_MULTIPROCESSING)
573  _Thread_queue_MP_set_callout( the_thread, queue_context );
574 #endif
575  ( *operations->extract )( queue, the_thread, queue_context );
576  return _Thread_queue_Make_ready_again( the_thread );
577 }
578 
580  bool unblock,
581  Thread_queue_Queue *queue,
582  Thread_Control *the_thread,
583  ISR_lock_Context *lock_context
584 )
585 {
586  if ( unblock ) {
587  Per_CPU_Control *cpu_self;
588 
589  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
590  _Thread_queue_Queue_release( queue, lock_context );
591 
592  _Thread_Remove_timer_and_unblock( the_thread, queue );
593 
594  _Thread_Dispatch_enable( cpu_self );
595  } else {
596  _Thread_queue_Queue_release( queue, lock_context );
597  }
598 }
599 
601  Thread_queue_Queue *queue,
602  const Thread_queue_Operations *operations,
603  Thread_Control *the_thread,
604  Thread_queue_Context *queue_context
605 )
606 {
607  bool unblock;
608 
610  queue,
611  operations,
612  the_thread,
613  queue_context
614  );
615 
617  unblock,
618  queue,
619  the_thread,
620  &queue_context->Lock_context.Lock_context
621  );
622 }
623 
625 {
626  Thread_queue_Context queue_context;
627  Thread_queue_Queue *queue;
628 
629  _Thread_queue_Context_initialize( &queue_context );
631  _Thread_Wait_acquire( the_thread, &queue_context );
632 
633  queue = the_thread->Wait.queue;
634 
635  if ( queue != NULL ) {
636  bool unblock;
637 
638  _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context );
640  &queue_context,
641  _Thread_queue_MP_callout_do_nothing
642  );
644  queue,
645  the_thread->Wait.operations,
646  the_thread,
647  &queue_context
648  );
650  unblock,
651  queue,
652  the_thread,
653  &queue_context.Lock_context.Lock_context
654  );
655  } else {
656  _Thread_Wait_release( the_thread, &queue_context );
657  }
658 }
659 
661  Thread_queue_Queue *queue,
662  Thread_queue_Heads *heads,
663  Thread_Control *previous_owner,
664  Thread_queue_Context *queue_context,
665  const Thread_queue_Operations *operations
666 )
667 {
668  Thread_Control *new_owner;
669  bool unblock;
670  Per_CPU_Control *cpu_self;
671 
672  _Assert( heads != NULL );
673 
675  new_owner = ( *operations->surrender )(
676  queue,
677  heads,
678  previous_owner,
679  queue_context
680  );
681  queue->owner = new_owner;
682 
683 #if defined(RTEMS_MULTIPROCESSING)
684  if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
685 #endif
686  {
688  }
689 
690  unblock = _Thread_queue_Make_ready_again( new_owner );
691 
692  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
694  queue,
695  &queue_context->Lock_context.Lock_context
696  );
697 
698  _Thread_Priority_update( queue_context );
699 
700  if ( unblock ) {
701  _Thread_Remove_timer_and_unblock( new_owner, queue );
702  }
703 
704  _Thread_Dispatch_enable( cpu_self );
705 }
706 
707 #if defined(RTEMS_SMP)
709  Thread_queue_Queue *queue,
710  Thread_queue_Heads *heads,
711  Thread_Control *previous_owner,
712  Thread_queue_Context *queue_context,
713  const Thread_queue_Operations *operations
714 )
715 {
716  Thread_Control *new_owner;
717  Per_CPU_Control *cpu_self;
718 
719  _Assert( heads != NULL );
720 
722  new_owner = ( *operations->surrender )(
723  queue,
724  heads,
725  previous_owner,
726  queue_context
727  );
728  queue->owner = new_owner;
729  _Thread_queue_Make_ready_again( new_owner );
730 
731  cpu_self = _Thread_queue_Dispatch_disable( queue_context );
733  queue,
734  &queue_context->Lock_context.Lock_context
735  );
736  _Thread_Priority_and_sticky_update( previous_owner, -1 );
737  _Thread_Priority_and_sticky_update( new_owner, 0 );
738  _Thread_Dispatch_enable( cpu_self );
739 }
740 #endif
741 
743  Thread_queue_Control *the_thread_queue,
744  const Thread_queue_Operations *operations
745 #if defined(RTEMS_MULTIPROCESSING)
746  ,
747  Thread_queue_MP_callout mp_callout
748 #endif
749 )
750 {
751  Thread_queue_Context queue_context;
752  Thread_Control *the_thread;
753 
754  _Thread_queue_Context_initialize( &queue_context );
755  _Thread_queue_Context_set_MP_callout( &queue_context, mp_callout );
756  _Thread_queue_Acquire( the_thread_queue, &queue_context );
757 
758  the_thread = _Thread_queue_First_locked( the_thread_queue, operations );
759 
760  if ( the_thread != NULL ) {
762  &the_thread_queue->Queue,
763  operations,
764  the_thread,
765  &queue_context
766  );
767  } else {
768  _Thread_queue_Release( the_thread_queue, &queue_context );
769  }
770 
771  return the_thread;
772 }
773 
774 #if defined(RTEMS_MULTIPROCESSING)
775 void _Thread_queue_Unblock_proxy(
776  Thread_queue_Queue *queue,
777  Thread_Control *the_thread
778 )
779 {
780  const Thread_queue_Object *the_queue_object;
781  Thread_Proxy_control *the_proxy;
782  Thread_queue_MP_callout mp_callout;
783 
784  the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
785  the_proxy = (Thread_Proxy_control *) the_thread;
786  mp_callout = the_proxy->thread_queue_callout;
787  ( *mp_callout )( the_thread, the_queue_object->Object.id );
788 
789  _Thread_MP_Free_proxy( the_thread );
790 }
791 #endif
Thread_Control * owner
The thread queue owner.
Definition: threadq.h:431
#define ISR_LOCK_INITIALIZER(_name)
Initializer for static initialization of ISR locks.
Definition: isrlock.h:146
static __inline__ void * _RBTree_Find_inline(const RBTree_Control *the_rbtree, const void *key, bool(*equal)(const void *, const RBTree_Node *), bool(*less)(const void *, const RBTree_Node *), void *(*map)(RBTree_Node *))
Finds an object in the red-black tree with the specified key.
Definition: rbtree.h:557
static __inline__ Thread_Control * _Thread_queue_First_locked(Thread_queue_Control *the_thread_queue, const Thread_queue_Operations *operations)
Returns the first thread on the thread queue if it exists, otherwise NULL.
Definition: threadqimpl.h:1173
static __inline__ bool _Objects_Is_local_id(Objects_Id id RTEMS_UNUSED)
Checks if the id is of a local object.
Definition: objectimpl.h:587
struct Thread_queue_Context::@30 Path
Representation of a thread queue path from a start thread queue to the terminal thread queue...
const Thread_queue_Operations * operations
The current thread queue operations.
Definition: thread.h:491
static __inline__ Thread_Wait_flags _Thread_Wait_flags_get_acquire(const Thread_Control *the_thread)
Gets the thread&#39;s wait flags according to the ATOMIC_ORDER_ACQUIRE.
Definition: threadimpl.h:2233
static __inline__ bool _RBTree_Insert_inline(RBTree_Control *the_rbtree, RBTree_Node *the_node, const void *key, bool(*less)(const void *, const RBTree_Node *))
Inserts the node into the red-black tree.
Definition: rbtree.h:508
static __inline__ Per_CPU_Control * _Thread_queue_Dispatch_disable(Thread_queue_Context *queue_context)
Disables dispatching in a critical section.
Definition: threadqimpl.h:429
static __inline__ void _Thread_Wait_restore_default(Thread_Control *the_thread)
Restores the default thread wait queue and operations.
Definition: threadimpl.h:2029
bool _Thread_queue_Extract_locked(Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context)
Extracts the thread from the thread queue, restores the default wait operations and restores the defa...
Thread_Wait_information Wait
Definition: thread.h:767
Thread queue context for the thread queue methods.
Definition: threadq.h:198
void _Thread_Dispatch_direct(Per_CPU_Control *cpu_self)
Directly do a thread dispatch.
#define _Thread_queue_Context_set_MP_callout(queue_context, mp_callout)
Sets the MP callout in the thread queue context.
Definition: threadqimpl.h:456
static __inline__ void _Chain_Initialize_node(Chain_Node *the_node)
Initializes a chain node.
Definition: chainimpl.h:122
Inlined Routines in the Watchdog Handler.
Thread_queue_Enqueue_callout enqueue_callout
The enqueue callout for _Thread_queue_Enqueue().
Definition: threadq.h:221
static __inline__ void _Thread_Resource_count_increment(Thread_Control *the_thread)
Increments the thread&#39;s resource count.
Definition: threadimpl.h:1339
void _Thread_queue_Acquire(Thread_queue_Control *the_thread_queue, Thread_queue_Context *queue_context)
Acquires the thread queue control in a critical section.
Definition: threadq.c:87
Constants and Structures Related with Thread Dispatch.
static __inline__ void _Thread_queue_Context_initialize(Thread_queue_Context *queue_context)
Initializes a thread queue context.
Definition: threadqimpl.h:152
Thread_queue_Deadlock_callout deadlock_callout
Invoked in case of a detected deadlock.
Definition: threadq.h:304
uint32_t return_code
Definition: thread.h:413
static __inline__ bool _Thread_Wait_flags_try_change_acquire(Thread_Control *the_thread, Thread_Wait_flags expected_flags, Thread_Wait_flags desired_flags)
Tries to change the thread wait flags with acquire semantics.
Definition: threadimpl.h:2300
struct Thread_Wait_information::@26 Lock
Thread wait lock control block.
void _Thread_Priority_and_sticky_update(Thread_Control *the_thread, int sticky_level_change)
Updates the priority of the thread and changes it sticky level.
Thread_queue_Queue * queue
The current thread queue.
Definition: thread.h:482
static __inline__ Thread_Wait_flags _Thread_Wait_flags_get(const Thread_Control *the_thread)
Gets the thread&#39;s wait flags according to the ATOMIC_ORDER_RELAXED.
Definition: threadimpl.h:2215
Red-black tree node.
Definition: rbtree.h:55
ISR lock control.
Definition: isrlock.h:56
void _Thread_queue_Unblock_critical(bool unblock, Thread_queue_Queue *queue, Thread_Control *the_thread, ISR_lock_Context *lock_context)
Unblocks the thread which was on the thread queue before.
bool _Thread_queue_Path_acquire_critical(Thread_queue_Queue *queue, Thread_Control *the_thread, Thread_queue_Context *queue_context)
Does nothing.
Thread_queue_Queue * queue
The thread queue in case the thread is blocked on a thread queue.
Definition: threadq.h:147
Thread_queue_Link Deadlock
In case of a deadlock, a link for the first thread on the path that tries to enqueue on a thread queu...
Definition: threadq.h:266
Thread queue operations.
Definition: threadq.h:517
Information for the Assert Handler.
Thread_Control * _Thread_queue_Do_dequeue(Thread_queue_Control *the_thread_queue, const Thread_queue_Operations *operations)
Dequeues the first thread waiting on the thread queue and returns it.
Thread_queue_Queue Queue
The actual thread queue.
Definition: threadq.h:583
Thread queue heads.
Definition: threadq.h:360
static __inline__ void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Appends a node (unprotected).
Definition: chainimpl.h:680
static __inline__ Per_CPU_Control * _Thread_Dispatch_disable_critical(const ISR_lock_Context *lock_context)
Disables thread dispatching inside a critical section (interrupts disabled).
struct Thread_queue_Lock_context::@28 Wait
Data to support thread queue enqueue operations.
void _Thread_queue_Deadlock_fatal(Thread_Control *the_thread)
Results in an INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK fatal error.
static __inline__ void _Thread_Wait_flags_set(Thread_Control *the_thread, Thread_Wait_flags flags)
Sets the thread&#39;s wait flags.
Definition: threadimpl.h:2196
void _Thread_queue_Deadlock_status(Thread_Control *the_thread)
Sets the thread wait return code to STATUS_DEADLOCK.
Thread_queue_Surrender_operation surrender
Thread queue surrender operation.
Definition: threadq.h:540
void _Thread_queue_Enqueue_do_nothing_extra(Thread_queue_Queue *queue, Thread_Control *the_thread, Per_CPU_Control *cpu_self, Thread_queue_Context *queue_context)
Does nothing.
Thread_queue_Gate Gate
Gate to synchronize thread wait lock requests.
Definition: threadq.h:142
Per CPU Core Structure.
Definition: percpu.h:347
static __inline__ void _Thread_Timer_remove(Thread_Control *the_thread)
Remove the watchdog timer from the thread.
Definition: threadimpl.h:2456
static __inline__ void _Thread_Wait_acquire(Thread_Control *the_thread, Thread_queue_Context *queue_context)
Acquires the thread wait default lock and disables interrupts.
Definition: threadimpl.h:1868
static __inline__ void _Thread_Wait_remove_request(Thread_Control *the_thread, Thread_queue_Lock_context *queue_lock_context)
Removes a thread wait lock request.
Definition: threadimpl.h:2000
static __inline__ Chain_Node * _Chain_Head(Chain_Control *the_chain)
Returns pointer to chain head.
Definition: chainimpl.h:195
Thread_queue_Lock_context Lock_context
The lock context for the thread queue acquire and release operations.
Definition: threadq.h:203
static __inline__ bool _Thread_Wait_flags_try_change_release(Thread_Control *the_thread, Thread_Wait_flags expected_flags, Thread_Wait_flags desired_flags)
Tries to change the thread wait flags with release semantics in case of success.
Definition: threadimpl.h:2260
Objects_Control Object
Definition: thread.h:727
Thread_queue_Extract_operation extract
Thread queue extract operation.
Definition: threadq.h:535
Constants and Structures Associated with the Manipulation of Objects.
#define _ISR_lock_Acquire(_lock, _context)
Acquires an ISR lock inside an ISR disabled section.
Definition: isrlock.h:284
void _Thread_queue_Surrender(Thread_queue_Queue *queue, Thread_queue_Heads *heads, Thread_Control *previous_owner, Thread_queue_Context *queue_context, const Thread_queue_Operations *operations)
Surrenders the thread queue previously owned by the thread to the first enqueued thread.
#define _ISR_lock_Release(_lock, _context)
Releases an ISR lock inside an ISR disabled section.
Definition: isrlock.h:310
void _Thread_Priority_update(Thread_queue_Context *queue_context)
Updates the priority of all threads in the set.
void _Thread_Dispatch_enable(Per_CPU_Control *cpu_self)
Enables thread dispatching.
States_Control _Thread_Set_state(Thread_Control *the_thread, States_Control state)
Sets the specified thread state.
static __inline__ void _Thread_queue_Context_clear_priority_updates(Thread_queue_Context *queue_context)
Clears the priority update count of the thread queue context.
Definition: threadqimpl.h:338
static __inline__ void _Thread_Wait_release_queue_critical(Thread_queue_Queue *queue, Thread_queue_Lock_context *queue_lock_context)
Releases the wait queue inside a critical section.
Definition: threadimpl.h:1792
static __inline__ void _Thread_Remove_timer_and_unblock(Thread_Control *the_thread, Thread_queue_Queue *queue)
Remove the watchdog timer from the thread and unblock if necessary.
Definition: threadimpl.h:2482
static __inline__ void _Thread_queue_Queue_release(Thread_queue_Queue *queue, ISR_lock_Context *lock_context)
Releases the thread queue queue and enables interrupts.
Definition: threadqimpl.h:625
#define RBTREE_INITIALIZER_EMPTY(name)
Initializer for an empty red-black tree with designator name.
Definition: rbtree.h:70
void _Thread_queue_Surrender_sticky(Thread_queue_Queue *queue, Thread_queue_Heads *heads, Thread_Control *previous_owner, Thread_queue_Context *queue_context, const Thread_queue_Operations *operations)
Surrenders the thread queue previously owned by the thread to the first enqueued thread.
States_Control thread_state
The thread state for _Thread_queue_Enqueue().
Definition: threadq.h:208
Thread_queue_Link Link
Thread queue link provided for use by the thread wait lock owner to build a thread queue path...
Definition: thread.h:471
static __inline__ void _Chain_Initialize_empty(Chain_Control *the_chain)
Initializes this chain as empty.
Definition: chainimpl.h:505
static __inline__ void _Thread_Wait_tranquilize(Thread_Control *the_thread)
Tranquilizes the thread after a wait on a thread queue.
Definition: threadimpl.h:2088
static __inline__ void _RBTree_Initialize_node(RBTree_Node *the_node)
Initializes a red-black tree node.
Definition: rbtree.h:129
Thread_queue_Enqueue_operation enqueue
Thread queue enqueue operation.
Definition: threadq.h:528
volatile uint32_t thread_dispatch_disable_level
The thread dispatch critical section nesting counter which is used to prevent context switches at ino...
Definition: percpu.h:385
void _Internal_error(Internal_errors_Core_list core_error) RTEMS_NO_RETURN
Terminates the system with an INTERNAL_ERROR_CORE fatal source and the specified core error code...
Definition: interr.c:51
Inlined Routines from the Thread Handler.
static __inline__ Status_Control _Thread_Wait_get_status(const Thread_Control *the_thread)
Get the status of the wait return code of the thread.
Definition: threadimpl.h:2354
Chain_Control Pending_requests
The pending thread wait lock acquire or tranquilize requests in case the thread is enqueued on a thre...
Definition: thread.h:453
static __inline__ Chain_Node * _Chain_Last(const Chain_Control *the_chain)
Returns pointer to chain&#39;s last node.
Definition: chainimpl.h:294
static __inline__ void _Thread_Wait_acquire_default_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Acquires the thread wait default lock inside a critical section (interrupts disabled).
Definition: threadimpl.h:1658
static __inline__ Chain_Node * _Chain_Previous(const Chain_Node *the_node)
Returns pointer to the previous node from this node.
Definition: chainimpl.h:359
Chain_Control Links
The chain of thread queue links defining the thread queue path.
Definition: threadq.h:255
static __inline__ void _Chain_Set_off_chain(Chain_Node *node)
Sets off chain.
Definition: chainimpl.h:104
void _Thread_queue_Extract(Thread_Control *the_thread)
Extracts thread from thread queue.
void _RBTree_Extract(RBTree_Control *the_rbtree, RBTree_Node *the_node)
Extracts (removes) the node from the red-black tree.
Definition: rbtreeextract.c:35
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
static __inline__ void _Thread_Wait_release(Thread_Control *the_thread, Thread_queue_Context *queue_context)
Releases the thread wait lock and restores the previous interrupt status.
Definition: threadimpl.h:1929
ISR_lock_Context Lock_context
The lock context for the thread queue acquire and release operations.
Definition: threadq.h:130
static __inline__ void _Thread_Wait_claim_finalize(Thread_Control *the_thread, const Thread_queue_Operations *operations)
Finalizes the thread wait queue claim via registration of the corresponding thread queue operations...
Definition: threadimpl.h:1981
void _Thread_queue_Path_release_critical(Thread_queue_Context *queue_context)
Releases the thread queue path in a critical section.
Status_Control _Thread_queue_Enqueue_sticky(Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context)
Enqueues the thread on the thread queue and busy waits for dequeue.
static __inline__ void _Thread_Wait_release_default_critical(Thread_Control *the_thread, ISR_lock_Context *lock_context)
Releases the thread wait default lock inside a critical section (interrupts disabled).
Definition: threadimpl.h:1718
void _Thread_queue_Release(Thread_queue_Control *the_thread_queue, Thread_queue_Context *queue_context)
Releases the thread queue control and enables interrupts.
Definition: threadq.c:118
Thread_queue_Link Start
The start of a thread queue path.
Definition: threadq.h:260
void _Thread_queue_Enqueue(Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context)
Blocks the thread and places it on the thread queue.
void _Thread_queue_Extract_critical(Thread_queue_Queue *queue, const Thread_queue_Operations *operations, Thread_Control *the_thread, Thread_queue_Context *queue_context)
Extracts the thread from the thread queue and unblocks it.
static __inline__ void _Thread_Wait_claim(Thread_Control *the_thread, Thread_queue_Queue *queue)
Claims the thread wait queue.
Definition: threadimpl.h:1952
static __inline__ void _Thread_Wait_acquire_queue_critical(Thread_queue_Queue *queue, Thread_queue_Lock_context *queue_lock_context)
Acquires the wait queue inside a critical section.
Definition: threadimpl.h:1774
static __inline__ void _Thread_queue_Gate_add(Chain_Control *chain, Thread_queue_Gate *gate)
Adds the gate to the chain.
Definition: threadqimpl.h:481
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
Helper structure to ensure that all objects containing a thread queue have the right layout...
Definition: threadqimpl.h:1445
static __inline__ void _Thread_Wait_remove_request_locked(Thread_Control *the_thread, Thread_queue_Lock_context *queue_lock_context)
Removes the first pending wait lock request.
Definition: threadimpl.h:1753
Objects_Id id
Definition: objectdata.h:43