RTEMS 6.1-rc7
Loading...
Searching...
No Matches
percpu.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * COPYRIGHT (c) 1989-2011.
14 * On-Line Applications Research Corporation (OAR).
15 *
16 * Copyright (C) 2012, 2018 embedded brains GmbH & Co. KG
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifndef _RTEMS_PERCPU_H
41#define _RTEMS_PERCPU_H
42
43#include <rtems/score/cpuimpl.h>
44
45#if defined( ASM )
46 #include <rtems/asm.h>
47#else
48 #include <rtems/score/assert.h>
49 #include <rtems/score/chain.h>
50 #include <rtems/score/isrlock.h>
51 #include <rtems/score/smp.h>
53 #include <rtems/score/watchdog.h>
54#endif
55
56#ifdef __cplusplus
57extern "C" {
58#endif
59
60#if defined( RTEMS_SMP )
61 #if defined( RTEMS_PROFILING )
62 #define PER_CPU_CONTROL_SIZE_PROFILING 332
63 #else
64 #define PER_CPU_CONTROL_SIZE_PROFILING 0
65 #endif
66
67 #if defined( RTEMS_DEBUG )
68 #define PER_CPU_CONTROL_SIZE_DEBUG 76
69 #else
70 #define PER_CPU_CONTROL_SIZE_DEBUG 0
71 #endif
72
73 #if CPU_SIZEOF_POINTER > 4
74 #define PER_CPU_CONTROL_SIZE_BIG_POINTER 76
75 #else
76 #define PER_CPU_CONTROL_SIZE_BIG_POINTER 0
77 #endif
78
79 #define PER_CPU_CONTROL_SIZE_BASE 180
80 #define PER_CPU_CONTROL_SIZE_APPROX \
81 ( PER_CPU_CONTROL_SIZE_BASE + CPU_PER_CPU_CONTROL_SIZE + \
82 CPU_INTERRUPT_FRAME_SIZE + PER_CPU_CONTROL_SIZE_PROFILING + \
83 PER_CPU_CONTROL_SIZE_DEBUG + PER_CPU_CONTROL_SIZE_BIG_POINTER )
84
85 /*
86 * This ensures that on SMP configurations the individual per-CPU controls
87 * are on different cache lines to prevent false sharing. This define can be
88 * used in assembler code to easily get the per-CPU control for a particular
89 * processor.
90 */
91 #if PER_CPU_CONTROL_SIZE_APPROX > 1024
92 #define PER_CPU_CONTROL_SIZE_LOG2 11
93 #elif PER_CPU_CONTROL_SIZE_APPROX > 512
94 #define PER_CPU_CONTROL_SIZE_LOG2 10
95 #elif PER_CPU_CONTROL_SIZE_APPROX > 256
96 #define PER_CPU_CONTROL_SIZE_LOG2 9
97 #elif PER_CPU_CONTROL_SIZE_APPROX > 128
98 #define PER_CPU_CONTROL_SIZE_LOG2 8
99 #else
100 #define PER_CPU_CONTROL_SIZE_LOG2 7
101 #endif
102
103 #define PER_CPU_CONTROL_SIZE ( 1 << PER_CPU_CONTROL_SIZE_LOG2 )
104#endif
105
106#if !defined( ASM )
107
108struct Record_Control;
109
110struct _Thread_Control;
111
112struct Scheduler_Context;
113
129#if defined( RTEMS_SMP )
130
167typedef enum {
176 PER_CPU_STATE_INITIAL,
177
192 PER_CPU_STATE_READY_TO_START_MULTITASKING,
193
197 PER_CPU_STATE_UP,
198
202 PER_CPU_STATE_SHUTDOWN
203} Per_CPU_State;
204
205typedef void ( *Per_CPU_Job_handler )( void *arg );
206
213typedef struct {
217 Per_CPU_Job_handler handler;
218
222 void *arg;
223} Per_CPU_Job_context;
224
225/*
226 * Value for the Per_CPU_Job::done member to indicate that a job is done
227 * (handler was called on the target processor). Must not be a valid pointer
228 * value since it overlaps with the Per_CPU_Job::next member.
229 */
230#define PER_CPU_JOB_DONE 1
231
238typedef struct Per_CPU_Job {
239 union {
243 struct Per_CPU_Job *next;
244
251 Atomic_Ulong done;
252 };
253
257 const Per_CPU_Job_context *context;
258} Per_CPU_Job;
259
260#endif /* defined( RTEMS_SMP ) */
261
266/*
267 * This was added to address the following warning:
268 * warning: struct has no members
269 */
270#pragma GCC diagnostic push
271#pragma GCC diagnostic ignored "-Wpedantic"
272typedef struct {
273#if defined( RTEMS_PROFILING )
279 CPU_Counter_ticks thread_dispatch_disabled_instant;
280
285 CPU_Counter_ticks max_thread_dispatch_disabled_time;
286
294 CPU_Counter_ticks max_interrupt_time;
295
300 CPU_Counter_ticks max_interrupt_delay;
301
308 uint64_t thread_dispatch_disabled_count;
309
319 uint64_t total_thread_dispatch_disabled_time;
320
327 uint64_t interrupt_count;
328
337 uint64_t total_interrupt_time;
338#endif /* defined( RTEMS_PROFILING ) */
340#pragma GCC diagnostic pop
341
345typedef enum {
354
363
372
378
384typedef struct Per_CPU_Control {
385 #if CPU_PER_CPU_CONTROL_SIZE > 0
389 CPU_Per_CPU_control cpu_per_cpu;
390 #endif
391
396
401
407
417
423
437 volatile bool dispatch_necessary;
438
439 /*
440 * Ensure that the executing member is at least 4-byte aligned, see
441 * PER_CPU_OFFSET_EXECUTING. This is necessary on CPU ports with relaxed
442 * alignment restrictions, e.g. type alignment is less than the type size.
443 */
444 bool reserved_for_executing_alignment[ 3 ];
445
458
474
475#if defined(RTEMS_SMP)
476 CPU_Interrupt_frame Interrupt_frame;
477#endif
478
490
494 struct {
495#if defined(RTEMS_SMP)
499 ISR_lock_Control Lock;
500#endif
501
506 uint64_t ticks;
507
515
516 #if defined( RTEMS_SMP )
520 ISR_lock_Control Lock;
521
529 ISR_lock_Context Lock_context;
530
536 Chain_Control Threads_in_need_for_help;
537
544 Atomic_Ulong message;
545
546 struct {
553 const struct _Scheduler_Control *control;
554
561 const struct Scheduler_Context *context;
562
567 struct _Thread_Control *idle_if_online_and_unused;
568 } Scheduler;
569
575 struct _Thread_Control *ancestor;
576
582 char *data;
583
592 Atomic_Uint state;
593
599 struct {
604 ISR_lock_Control Lock;
605
612 struct Per_CPU_Job *head;
613
622 struct Per_CPU_Job **tail;
623 } Jobs;
624
629 bool online;
630
635 bool boot;
636 #endif
637
638 struct Record_Control *record;
639
640 Per_CPU_Stats Stats;
642
643#if defined( RTEMS_SMP )
644typedef struct {
645 Per_CPU_Control per_cpu;
646 char unused_space_for_cache_line_alignment
647 [ PER_CPU_CONTROL_SIZE - sizeof( Per_CPU_Control ) ];
649#else
650typedef struct {
651 Per_CPU_Control per_cpu;
653#endif
654
660extern CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[];
661
662#define _Per_CPU_Acquire( cpu, lock_context ) \
663 _ISR_lock_Acquire( &( cpu )->Lock, lock_context )
664
665#define _Per_CPU_Release( cpu, lock_context ) \
666 _ISR_lock_Release( &( cpu )->Lock, lock_context )
667
668/*
669 * If we get the current processor index in a context which allows thread
670 * dispatching, then we may already run on another processor right after the
671 * read instruction. There are very few cases in which this makes sense (here
672 * we can use _Per_CPU_Get_snapshot()). All other places must use
673 * _Per_CPU_Get() so that we can add checks for RTEMS_DEBUG.
674 */
675#if defined( _CPU_Get_current_per_CPU_control )
676 #define _Per_CPU_Get_snapshot() _CPU_Get_current_per_CPU_control()
677#else
678 #define _Per_CPU_Get_snapshot() \
679 ( &_Per_CPU_Information[ _SMP_Get_current_processor() ].per_cpu )
680#endif
681
682#if defined( RTEMS_SMP )
683static inline Per_CPU_Control *_Per_CPU_Get( void )
684{
685 Per_CPU_Control *cpu_self = _Per_CPU_Get_snapshot();
686
687 _Assert(
688 cpu_self->thread_dispatch_disable_level != 0 || _ISR_Get_level() != 0
689 );
690
691 return cpu_self;
692}
693#else
694#define _Per_CPU_Get() _Per_CPU_Get_snapshot()
695#endif
696
697static inline Per_CPU_Control *_Per_CPU_Get_by_index( uint32_t index )
698{
699 return &_Per_CPU_Information[ index ].per_cpu;
700}
701
702static inline uint32_t _Per_CPU_Get_index( const Per_CPU_Control *cpu )
703{
704#if defined(RTEMS_SMP)
705 const Per_CPU_Control_envelope *per_cpu_envelope =
706 ( const Per_CPU_Control_envelope * ) cpu;
707
708 return ( uint32_t ) ( per_cpu_envelope - &_Per_CPU_Information[ 0 ] );
709#else
710 (void) cpu;
711 return 0;
712#endif
713}
714
715static inline struct _Thread_Control *_Per_CPU_Get_executing(
716 const Per_CPU_Control *cpu
717)
718{
719 return cpu->executing;
720}
721
722static inline bool _Per_CPU_Is_ISR_in_progress( const Per_CPU_Control *cpu )
723{
724#if CPU_PROVIDES_ISR_IS_IN_PROGRESS == TRUE
725 (void) cpu;
726 return _ISR_Is_in_progress();
727#else
728 return cpu->isr_nest_level != 0;
729#endif
730}
731
732static inline bool _Per_CPU_Is_processor_online(
733 const Per_CPU_Control *cpu
734)
735{
736#if defined( RTEMS_SMP )
737 return cpu->online;
738#else
739 (void) cpu;
740
741 return true;
742#endif
743}
744
745static inline bool _Per_CPU_Is_boot_processor(
746 const Per_CPU_Control *cpu
747)
748{
749#if defined( RTEMS_SMP )
750 return cpu->boot;
751#else
752 (void) cpu;
753
754 return true;
755#endif
756}
757
758static inline void _Per_CPU_Acquire_all(
759 ISR_lock_Context *lock_context
760)
761{
762#if defined(RTEMS_SMP)
763 uint32_t cpu_max;
764 uint32_t cpu_index;
765 Per_CPU_Control *previous_cpu;
766
767 cpu_max = _SMP_Get_processor_maximum();
768 previous_cpu = _Per_CPU_Get_by_index( 0 );
769
770 _ISR_lock_ISR_disable( lock_context );
771 _Per_CPU_Acquire( previous_cpu, lock_context );
772
773 for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
774 Per_CPU_Control *cpu;
775
776 cpu = _Per_CPU_Get_by_index( cpu_index );
777 _Per_CPU_Acquire( cpu, &previous_cpu->Lock_context );
778 previous_cpu = cpu;
779 }
780#else
781 _ISR_lock_ISR_disable( lock_context );
782#endif
783}
784
785static inline void _Per_CPU_Release_all(
786 ISR_lock_Context *lock_context
787)
788{
789#if defined(RTEMS_SMP)
790 uint32_t cpu_max;
791 uint32_t cpu_index;
792 Per_CPU_Control *cpu;
793
794 cpu_max = _SMP_Get_processor_maximum();
795 cpu = _Per_CPU_Get_by_index( cpu_max - 1 );
796
797 for ( cpu_index = cpu_max - 1 ; cpu_index > 0 ; --cpu_index ) {
798 Per_CPU_Control *previous_cpu;
799
800 previous_cpu = _Per_CPU_Get_by_index( cpu_index - 1 );
801 _Per_CPU_Release( cpu, &previous_cpu->Lock_context );
802 cpu = previous_cpu;
803 }
804
805 _Per_CPU_Release( cpu, lock_context );
806 _ISR_lock_ISR_enable( lock_context );
807#else
808 _ISR_lock_ISR_enable( lock_context );
809#endif
810}
811
812#if defined( RTEMS_SMP )
813
821static inline Per_CPU_State _Per_CPU_Get_state( const Per_CPU_Control *cpu )
822{
823 return (Per_CPU_State)
824 _Atomic_Load_uint( &cpu->state, ATOMIC_ORDER_ACQUIRE );
825}
826
835static inline void _Per_CPU_Set_state(
836 Per_CPU_Control *cpu_self,
837 Per_CPU_State state
838)
839{
840 _Assert( cpu_self == _Per_CPU_Get() );
841 _Atomic_Store_uint(
842 &cpu_self->state,
843 (unsigned int) state,
844 ATOMIC_ORDER_RELEASE
845 );
846}
847
853void _Per_CPU_Perform_jobs( Per_CPU_Control *cpu );
854
865void _Per_CPU_Add_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
866
878void _Per_CPU_Submit_job( Per_CPU_Control *cpu, Per_CPU_Job *job );
879
889void _Per_CPU_Wait_for_job(
890 const Per_CPU_Control *cpu,
891 const Per_CPU_Job *job
892);
893
894#endif /* defined( RTEMS_SMP ) */
895
896/*
897 * On a non SMP system, the _SMP_Get_current_processor() is defined to 0.
898 * Thus when built for non-SMP, there should be no performance penalty.
899 */
900#define _Thread_Dispatch_disable_level \
901 _Per_CPU_Get()->thread_dispatch_disable_level
902#define _Thread_Heir \
903 _Per_CPU_Get()->heir
904
905#if defined(_CPU_Get_thread_executing)
906#define _Thread_Executing \
907 _CPU_Get_thread_executing()
908#else
909#define _Thread_Executing \
910 _Per_CPU_Get_executing( _Per_CPU_Get() )
911#endif
912
913#define _ISR_Nest_level \
914 _Per_CPU_Get()->isr_nest_level
915#define _CPU_Interrupt_stack_low \
916 _Per_CPU_Get()->interrupt_stack_low
917#define _CPU_Interrupt_stack_high \
918 _Per_CPU_Get()->interrupt_stack_high
919#define _Thread_Dispatch_necessary \
920 _Per_CPU_Get()->dispatch_necessary
921
932static inline struct _Thread_Control *_Thread_Get_executing( void )
933{
934 struct _Thread_Control *executing;
935
936 #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
937 ISR_Level level;
938
939 _ISR_Local_disable( level );
940 #endif
941
942 executing = _Thread_Executing;
943
944 #if defined(RTEMS_SMP) && !defined(_CPU_Get_thread_executing)
945 _ISR_Local_enable( level );
946 #endif
947
948 return executing;
949}
950
953#endif /* !defined( ASM ) */
954
955#if defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS )
956
957#define PER_CPU_INTERRUPT_STACK_LOW \
958 CPU_PER_CPU_CONTROL_SIZE
959#define PER_CPU_INTERRUPT_STACK_HIGH \
960 PER_CPU_INTERRUPT_STACK_LOW + CPU_SIZEOF_POINTER
961
962#define INTERRUPT_STACK_LOW \
963 (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_LOW)
964#define INTERRUPT_STACK_HIGH \
965 (SYM(_Per_CPU_Information) + PER_CPU_INTERRUPT_STACK_HIGH)
966
967/*
968 * These are the offsets of the required elements in the per CPU table.
969 */
970#define PER_CPU_ISR_NEST_LEVEL \
971 PER_CPU_INTERRUPT_STACK_HIGH + CPU_SIZEOF_POINTER
972#define PER_CPU_ISR_DISPATCH_DISABLE \
973 PER_CPU_ISR_NEST_LEVEL + 4
974#define PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL \
975 PER_CPU_ISR_DISPATCH_DISABLE + 4
976#define PER_CPU_DISPATCH_NEEDED \
977 PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL + 4
978#define PER_CPU_OFFSET_EXECUTING \
979 PER_CPU_DISPATCH_NEEDED + 4
980#define PER_CPU_OFFSET_HEIR \
981 PER_CPU_OFFSET_EXECUTING + CPU_SIZEOF_POINTER
982#if defined(RTEMS_SMP)
983#define PER_CPU_INTERRUPT_FRAME_AREA \
984 PER_CPU_OFFSET_HEIR + CPU_SIZEOF_POINTER
985#endif
986
987#define THREAD_DISPATCH_DISABLE_LEVEL \
988 (SYM(_Per_CPU_Information) + PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL)
989#define ISR_NEST_LEVEL \
990 (SYM(_Per_CPU_Information) + PER_CPU_ISR_NEST_LEVEL)
991#define DISPATCH_NEEDED \
992 (SYM(_Per_CPU_Information) + PER_CPU_DISPATCH_NEEDED)
993
994#endif /* defined( ASM ) || defined( _RTEMS_PERCPU_DEFINE_OFFSETS ) */
995
996#ifdef __cplusplus
997}
998#endif
999
1000#endif
1001/* end of include file */
This header file provides the interfaces of the Assert Handler.
This header file provides interfaces of the SMP Support which are used by the implementation and the ...
This header file provides interfaces of the Watchdog Handler which are used by the implementation and...
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG and static analysis runs.
Definition: assert.h:96
#define _ISR_lock_ISR_enable(_context)
Restores the saved interrupt state of the ISR lock context.
Definition: isrlock.h:385
#define _ISR_lock_ISR_disable(_context)
Disables interrupts and saves the previous interrupt state in the ISR lock context.
Definition: isrlock.h:364
#define _ISR_Local_disable(_level)
Disables interrupts on this processor.
Definition: isrlevel.h:76
bool _ISR_Is_in_progress(void)
Checks if an ISR in progress.
Definition: nios2-isr-is-in-progress.c:36
#define _ISR_Local_enable(_level)
Enables interrupts on this processor.
Definition: isrlevel.h:93
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:147
uint32_t ISR_Level
Definition: isrlevel.h:60
Per_CPU_Watchdog_index
Per-CPU watchdog header index.
Definition: percpu.h:345
CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[]
Set of Per CPU Core Information.
Definition: asm.h:171
@ PER_CPU_WATCHDOG_MONOTONIC
Index for monotonic clock per-CPU watchdog header.
Definition: percpu.h:371
@ PER_CPU_WATCHDOG_COUNT
Count of per-CPU watchdog headers.
Definition: percpu.h:376
@ PER_CPU_WATCHDOG_REALTIME
Index for realtime clock per-CPU watchdog header.
Definition: percpu.h:362
@ PER_CPU_WATCHDOG_TICKS
Index for tick clock per-CPU watchdog header.
Definition: percpu.h:353
int64_t Timestamp_Control
Definition: timestamp.h:76
This header file provides the interfaces of the ISR Locks.
rtems_termios_device_context * context
Definition: console-config.c:62
This header file provides interfaces of the Chain Handler which are used by the implementation and th...
Interrupt stack frame (ISF).
Definition: cpuimpl.h:64
The CPU specific per-CPU control.
Definition: cpuimpl.h:91
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:94
Definition: percpu.h:650
Per CPU Core Structure.
Definition: percpu.h:384
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:473
uint32_t isr_dispatch_disable
Indicates if an ISR thread dispatch is disabled.
Definition: percpu.h:416
void * interrupt_stack_high
The interrupt stack high address for this processor.
Definition: percpu.h:400
volatile uint32_t thread_dispatch_disable_level
The thread dispatch critical section nesting counter which is used to prevent context switches at ino...
Definition: percpu.h:422
volatile bool dispatch_necessary
This is set to true when this processor needs to run the thread dispatcher.
Definition: percpu.h:437
Timestamp_Control cpu_usage_timestamp
The CPU usage timestamp contains the time point of the last heir thread change or last CPU usage upda...
Definition: percpu.h:489
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:457
uint64_t ticks
Watchdog ticks on this processor used for monotonic clock watchdogs.
Definition: percpu.h:506
struct Per_CPU_Control::@4403 Watchdog
Watchdog state for this processor.
uint32_t isr_nest_level
Definition: percpu.h:406
void * interrupt_stack_low
The interrupt stack low address for this processor.
Definition: percpu.h:395
Per-CPU statistics.
Definition: percpu.h:272
Definition: record.h:44
Scheduler context.
Definition: scheduler.h:318
The watchdog header to manage scheduled watchdogs.
Definition: watchdog.h:90
Scheduler control.
Definition: scheduler.h:337
Definition: thread.h:837
Definition: intercom.c:87
Definition: media-server.c:46
This header file provides interfaces of the Timestamp Handler which are used by the implementation an...
This union represents a chain control block.
Definition: chain.h:96