RTEMS 6.1-rc1
fsl_common_arm.h
1/*
2 * Copyright (c) 2015-2016, Freescale Semiconductor, Inc.
3 * Copyright 2016-2022 NXP
4 * All rights reserved.
5 *
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8
9#ifndef _FSL_COMMON_ARM_H_
10#define _FSL_COMMON_ARM_H_
11
12/*
13 * For CMSIS pack RTE.
14 * CMSIS pack RTE generates "RTC_Components.h" which contains the statements
15 * of the related <RTE_Components_h> element for all selected software components.
16 */
17#ifdef _RTE_
18#include "RTE_Components.h"
19#endif
20
62/* clang-format off */
63#if ((defined(__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
64 (defined(__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
65 (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
66 (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ == 1)))
67/* clang-format on */
68
69/* If the LDREX and STREX are supported, use them. */
70#define _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, val, ops) \
71 do \
72 { \
73 (val) = __LDREXB(addr); \
74 (ops); \
75 } while (0UL != __STREXB((val), (addr)))
76
77#define _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, val, ops) \
78 do \
79 { \
80 (val) = __LDREXH(addr); \
81 (ops); \
82 } while (0UL != __STREXH((val), (addr)))
83
84#define _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, val, ops) \
85 do \
86 { \
87 (val) = __LDREXW(addr); \
88 (ops); \
89 } while (0UL != __STREXW((val), (addr)))
90
91static inline void _SDK_AtomicLocalAdd1Byte(volatile uint8_t *addr, uint8_t val)
92{
93 uint8_t s_val;
94
95 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val += val);
96}
97
98static inline void _SDK_AtomicLocalAdd2Byte(volatile uint16_t *addr, uint16_t val)
99{
100 uint16_t s_val;
101
102 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val += val);
103}
104
105static inline void _SDK_AtomicLocalAdd4Byte(volatile uint32_t *addr, uint32_t val)
106{
107 uint32_t s_val;
108
109 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val += val);
110}
111
112static inline void _SDK_AtomicLocalSub1Byte(volatile uint8_t *addr, uint8_t val)
113{
114 uint8_t s_val;
115
116 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val -= val);
117}
118
119static inline void _SDK_AtomicLocalSub2Byte(volatile uint16_t *addr, uint16_t val)
120{
121 uint16_t s_val;
122
123 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val -= val);
124}
125
126static inline void _SDK_AtomicLocalSub4Byte(volatile uint32_t *addr, uint32_t val)
127{
128 uint32_t s_val;
129
130 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val -= val);
131}
132
133static inline void _SDK_AtomicLocalSet1Byte(volatile uint8_t *addr, uint8_t bits)
134{
135 uint8_t s_val;
136
137 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val |= bits);
138}
139
140static inline void _SDK_AtomicLocalSet2Byte(volatile uint16_t *addr, uint16_t bits)
141{
142 uint16_t s_val;
143
144 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val |= bits);
145}
146
147static inline void _SDK_AtomicLocalSet4Byte(volatile uint32_t *addr, uint32_t bits)
148{
149 uint32_t s_val;
150
151 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val |= bits);
152}
153
154static inline void _SDK_AtomicLocalClear1Byte(volatile uint8_t *addr, uint8_t bits)
155{
156 uint8_t s_val;
157
158 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val &= ~bits);
159}
160
161static inline void _SDK_AtomicLocalClear2Byte(volatile uint16_t *addr, uint16_t bits)
162{
163 uint16_t s_val;
164
165 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val &= ~bits);
166}
167
168static inline void _SDK_AtomicLocalClear4Byte(volatile uint32_t *addr, uint32_t bits)
169{
170 uint32_t s_val;
171
172 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val &= ~bits);
173}
174
175static inline void _SDK_AtomicLocalToggle1Byte(volatile uint8_t *addr, uint8_t bits)
176{
177 uint8_t s_val;
178
179 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val ^= bits);
180}
181
182static inline void _SDK_AtomicLocalToggle2Byte(volatile uint16_t *addr, uint16_t bits)
183{
184 uint16_t s_val;
185
186 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val ^= bits);
187}
188
189static inline void _SDK_AtomicLocalToggle4Byte(volatile uint32_t *addr, uint32_t bits)
190{
191 uint32_t s_val;
192
193 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val ^= bits);
194}
195
196static inline void _SDK_AtomicLocalClearAndSet1Byte(volatile uint8_t *addr, uint8_t clearBits, uint8_t setBits)
197{
198 uint8_t s_val;
199
200 _SDK_ATOMIC_LOCAL_OPS_1BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
201}
202
203static inline void _SDK_AtomicLocalClearAndSet2Byte(volatile uint16_t *addr, uint16_t clearBits, uint16_t setBits)
204{
205 uint16_t s_val;
206
207 _SDK_ATOMIC_LOCAL_OPS_2BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
208}
209
210static inline void _SDK_AtomicLocalClearAndSet4Byte(volatile uint32_t *addr, uint32_t clearBits, uint32_t setBits)
211{
212 uint32_t s_val;
213
214 _SDK_ATOMIC_LOCAL_OPS_4BYTE(addr, s_val, s_val = (s_val & ~clearBits) | setBits);
215}
216
217#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
218 ((1UL == sizeof(*(addr))) ? \
219 _SDK_AtomicLocalAdd1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(val)) : \
220 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalAdd2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(val)) : \
221 _SDK_AtomicLocalAdd4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(val))))
222
223#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
224 ((1UL == sizeof(*(addr))) ? \
225 _SDK_AtomicLocalSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
226 ((2UL == sizeof(*(addr))) ? _SDK_AtomicLocalSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
227 _SDK_AtomicLocalSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
228
229#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
230 ((1UL == sizeof(*(addr))) ? \
231 _SDK_AtomicLocalClear1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
232 ((2UL == sizeof(*(addr))) ? \
233 _SDK_AtomicLocalClear2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
234 _SDK_AtomicLocalClear4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
235
236#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
237 ((1UL == sizeof(*(addr))) ? \
238 _SDK_AtomicLocalToggle1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(bits)) : \
239 ((2UL == sizeof(*(addr))) ? \
240 _SDK_AtomicLocalToggle2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(bits)) : \
241 _SDK_AtomicLocalToggle4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(bits))))
242
243#define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
244 ((1UL == sizeof(*(addr))) ? \
245 _SDK_AtomicLocalClearAndSet1Byte((volatile uint8_t *)(volatile void *)(addr), (uint8_t)(clearBits), (uint8_t)(setBits)) : \
246 ((2UL == sizeof(*(addr))) ? \
247 _SDK_AtomicLocalClearAndSet2Byte((volatile uint16_t *)(volatile void *)(addr), (uint16_t)(clearBits), (uint16_t)(setBits)) : \
248 _SDK_AtomicLocalClearAndSet4Byte((volatile uint32_t *)(volatile void *)(addr), (uint32_t)(clearBits), (uint32_t)(setBits))))
249#else
250
251#define SDK_ATOMIC_LOCAL_ADD(addr, val) \
252 do \
253 { \
254 uint32_t s_atomicOldInt; \
255 s_atomicOldInt = DisableGlobalIRQ(); \
256 *(addr) += (val); \
257 EnableGlobalIRQ(s_atomicOldInt); \
258 } while (0)
259
260#define SDK_ATOMIC_LOCAL_SET(addr, bits) \
261 do \
262 { \
263 uint32_t s_atomicOldInt; \
264 s_atomicOldInt = DisableGlobalIRQ(); \
265 *(addr) |= (bits); \
266 EnableGlobalIRQ(s_atomicOldInt); \
267 } while (0)
268
269#define SDK_ATOMIC_LOCAL_CLEAR(addr, bits) \
270 do \
271 { \
272 uint32_t s_atomicOldInt; \
273 s_atomicOldInt = DisableGlobalIRQ(); \
274 *(addr) &= ~(bits); \
275 EnableGlobalIRQ(s_atomicOldInt); \
276 } while (0)
277
278#define SDK_ATOMIC_LOCAL_TOGGLE(addr, bits) \
279 do \
280 { \
281 uint32_t s_atomicOldInt; \
282 s_atomicOldInt = DisableGlobalIRQ(); \
283 *(addr) ^= (bits); \
284 EnableGlobalIRQ(s_atomicOldInt); \
285 } while (0)
286
287#define SDK_ATOMIC_LOCAL_CLEAR_AND_SET(addr, clearBits, setBits) \
288 do \
289 { \
290 uint32_t s_atomicOldInt; \
291 s_atomicOldInt = DisableGlobalIRQ(); \
292 *(addr) = (*(addr) & ~(clearBits)) | (setBits); \
293 EnableGlobalIRQ(s_atomicOldInt); \
294 } while (0)
295
296#endif
297/* @} */
298
300/* @{ */
302#define USEC_TO_COUNT(us, clockFreqInHz) (uint64_t)(((uint64_t)(us) * (clockFreqInHz)) / 1000000U)
304#define COUNT_TO_USEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000000U / (clockFreqInHz))
305
307#define MSEC_TO_COUNT(ms, clockFreqInHz) (uint64_t)((uint64_t)(ms) * (clockFreqInHz) / 1000U)
309#define COUNT_TO_MSEC(count, clockFreqInHz) (uint64_t)((uint64_t)(count)*1000U / (clockFreqInHz))
310/* @} */
311
321#if (defined __CORTEX_M) && ((__CORTEX_M == 4U) || (__CORTEX_M == 7U))
322#define SDK_ISR_EXIT_BARRIER __DSB()
323#else
324#define SDK_ISR_EXIT_BARRIER
325#endif
326
327/* @} */
328
330/* @{ */
331#if (defined(__ICCARM__))
332/*
333 * Workaround to disable MISRA C message suppress warnings for IAR compiler.
334 * http:/ /supp.iar.com/Support/?note=24725
335 */
336_Pragma("diag_suppress=Pm120")
337#define SDK_PRAGMA(x) _Pragma(#x)
338 _Pragma("diag_error=Pm120")
340#define SDK_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var
341#elif defined(__CC_ARM) || defined(__ARMCC_VERSION)
343#define SDK_ALIGN(var, alignbytes) __attribute__((aligned(alignbytes))) var
344#elif defined(__GNUC__)
346#define SDK_ALIGN(var, alignbytes) var __attribute__((aligned(alignbytes)))
347#else
348#error Toolchain not supported
349#endif
350
352#if defined(FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
353#define SDK_L1DCACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L1DCACHE_LINESIZE_BYTE)
354#endif
356#if defined(FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
357#define SDK_L2CACHE_ALIGN(var) SDK_ALIGN(var, FSL_FEATURE_L2CACHE_LINESIZE_BYTE)
358#endif
359
361#define SDK_SIZEALIGN(var, alignbytes) \
362 ((unsigned int)((var) + ((alignbytes)-1U)) & (unsigned int)(~(unsigned int)((alignbytes)-1U)))
363/* @} */
364
366/* For initialized non-zero non-cacheable variables, please using "AT_NONCACHEABLE_SECTION_INIT(var) ={xx};" or
367 * "AT_NONCACHEABLE_SECTION_ALIGN_INIT(var) ={xx};" in your projects to define them, for zero-inited non-cacheable
368 * variables, please using "AT_NONCACHEABLE_SECTION(var);" or "AT_NONCACHEABLE_SECTION_ALIGN(var);" to define them,
369 * these zero-inited variables will be initialized to zero in system startup.
370 */
371/* @{ */
372
373#if ((!(defined(FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION) && FSL_FEATURE_HAS_NO_NONCACHEABLE_SECTION)) && \
374 defined(FSL_FEATURE_L1ICACHE_LINESIZE_BYTE))
375
376#if (defined(__ICCARM__))
377#define AT_NONCACHEABLE_SECTION(var) var @"NonCacheable"
378#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable"
379#define AT_NONCACHEABLE_SECTION_INIT(var) var @"NonCacheable.init"
380#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
381 SDK_PRAGMA(data_alignment = alignbytes) var @"NonCacheable.init"
382
383#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
384#define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
385#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
386 __attribute__((section("NonCacheable.init"))) __attribute__((aligned(alignbytes))) var
387#if (defined(__CC_ARM))
388#define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable"), zero_init)) var
389#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
390 __attribute__((section("NonCacheable"), zero_init)) __attribute__((aligned(alignbytes))) var
391#else
392#define AT_NONCACHEABLE_SECTION(var) __attribute__((section(".bss.NonCacheable"))) var
393#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
394 __attribute__((section(".bss.NonCacheable"))) __attribute__((aligned(alignbytes))) var
395#endif
396
397#elif (defined(__GNUC__))
398#if defined(__ARM_ARCH_8A__) /* This macro is ARMv8-A specific */
399#define __CS "//"
400#else
401#define __CS "@"
402#endif
403
404/* For GCC, when the non-cacheable section is required, please define "__STARTUP_INITIALIZE_NONCACHEDATA"
405 * in your projects to make sure the non-cacheable section variables will be initialized in system startup.
406 */
407#define AT_NONCACHEABLE_SECTION_INIT(var) __attribute__((section("NonCacheable.init"))) var
408#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) \
409 __attribute__((section("NonCacheable.init"))) var __attribute__((aligned(alignbytes)))
410#define AT_NONCACHEABLE_SECTION(var) __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var
411#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) \
412 __attribute__((section("NonCacheable,\"aw\",%nobits " __CS))) var __attribute__((aligned(alignbytes)))
413#else
414#error Toolchain not supported.
415#endif
416
417#else
418
419#define AT_NONCACHEABLE_SECTION(var) var
420#define AT_NONCACHEABLE_SECTION_ALIGN(var, alignbytes) SDK_ALIGN(var, alignbytes)
421#define AT_NONCACHEABLE_SECTION_INIT(var) var
422#define AT_NONCACHEABLE_SECTION_ALIGN_INIT(var, alignbytes) SDK_ALIGN(var, alignbytes)
423
424#endif
425
426/* @} */
427
432#if (defined(__ICCARM__))
433#define AT_QUICKACCESS_SECTION_CODE(func) func @"CodeQuickAccess"
434#define AT_QUICKACCESS_SECTION_DATA(var) var @"DataQuickAccess"
435#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
436 SDK_PRAGMA(data_alignment = alignbytes) var @"DataQuickAccess"
437#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
438#define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
439#define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
440#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
441 __attribute__((section("DataQuickAccess"))) __attribute__((aligned(alignbytes))) var
442#elif (defined(__GNUC__))
443#define AT_QUICKACCESS_SECTION_CODE(func) __attribute__((section("CodeQuickAccess"), __noinline__)) func
444#define AT_QUICKACCESS_SECTION_DATA(var) __attribute__((section("DataQuickAccess"))) var
445#define AT_QUICKACCESS_SECTION_DATA_ALIGN(var, alignbytes) \
446 __attribute__((section("DataQuickAccess"))) var __attribute__((aligned(alignbytes)))
447#else
448#error Toolchain not supported.
449#endif /* defined(__ICCARM__) */
450
452#if (defined(__ICCARM__))
453#define RAMFUNCTION_SECTION_CODE(func) func @"RamFunction"
454#elif (defined(__CC_ARM) || defined(__ARMCC_VERSION))
455#define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
456#elif (defined(__GNUC__))
457#define RAMFUNCTION_SECTION_CODE(func) __attribute__((section("RamFunction"))) func
458#else
459#error Toolchain not supported.
460#endif /* defined(__ICCARM__) */
461/* @} */
462
463#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
464 void DefaultISR(void);
465#endif
466
467/*
468 * The fsl_clock.h is included here because it needs MAKE_VERSION/MAKE_STATUS/status_t
469 * defined in previous of this file.
470 */
471#include "fsl_clock.h"
472
473/*
474 * Chip level peripheral reset API, for MCUs that implement peripheral reset control external to a peripheral
475 */
476#if ((defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0)) || \
477 (defined(FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT) && (FSL_FEATURE_SOC_ASYNC_SYSCON_COUNT > 0)))
478#include "fsl_reset.h"
479#endif
480
481/*******************************************************************************
482 * API
483 ******************************************************************************/
484
485#if defined(__cplusplus)
486extern "C" {
487#endif /* __cplusplus*/
488
505static inline status_t EnableIRQ(IRQn_Type interrupt)
506{
507 status_t status = kStatus_Success;
508
509 if (NotAvail_IRQn == interrupt)
510 {
511 status = kStatus_Fail;
512 }
513
514#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
515 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
516 {
517 status = kStatus_Fail;
518 }
519#endif
520
521 else
522 {
523#if defined(__GIC_PRIO_BITS)
524 GIC_EnableIRQ(interrupt);
525#else
526 NVIC_EnableIRQ(interrupt);
527#endif
528 }
529
530 return status;
531}
532
549static inline status_t DisableIRQ(IRQn_Type interrupt)
550{
551 status_t status = kStatus_Success;
552
553 if (NotAvail_IRQn == interrupt)
554 {
555 status = kStatus_Fail;
556 }
557
558#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
559 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
560 {
561 status = kStatus_Fail;
562 }
563#endif
564
565 else
566 {
567#if defined(__GIC_PRIO_BITS)
568 GIC_DisableIRQ(interrupt);
569#else
570 NVIC_DisableIRQ(interrupt);
571#endif
572 }
573
574 return status;
575}
576
577#if defined(__GIC_PRIO_BITS)
578#define NVIC_SetPriority(irq, prio) do {} while(0)
579#endif
580
598static inline status_t EnableIRQWithPriority(IRQn_Type interrupt, uint8_t priNum)
599{
600 status_t status = kStatus_Success;
601
602 if (NotAvail_IRQn == interrupt)
603 {
604 status = kStatus_Fail;
605 }
606
607#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
608 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
609 {
610 status = kStatus_Fail;
611 }
612#endif
613
614 else
615 {
616#if defined(__GIC_PRIO_BITS)
617 GIC_SetPriority(interrupt, priNum);
618 GIC_EnableIRQ(interrupt);
619#else
620 NVIC_SetPriority(interrupt, priNum);
621 NVIC_EnableIRQ(interrupt);
622#endif
623 }
624
625 return status;
626}
627
646static inline status_t IRQ_SetPriority(IRQn_Type interrupt, uint8_t priNum)
647{
648 status_t status = kStatus_Success;
649
650 if (NotAvail_IRQn == interrupt)
651 {
652 status = kStatus_Fail;
653 }
654
655#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
656 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
657 {
658 status = kStatus_Fail;
659 }
660#endif
661
662 else
663 {
664#if defined(__GIC_PRIO_BITS)
665 GIC_SetPriority(interrupt, priNum);
666#else
667 NVIC_SetPriority(interrupt, priNum);
668#endif
669 }
670
671 return status;
672}
673
691static inline status_t IRQ_ClearPendingIRQ(IRQn_Type interrupt)
692{
693 status_t status = kStatus_Success;
694
695 if (NotAvail_IRQn == interrupt)
696 {
697 status = kStatus_Fail;
698 }
699
700#if defined(FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS) && (FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS > 0)
701 else if ((int32_t)interrupt >= (int32_t)FSL_FEATURE_NUMBER_OF_LEVEL1_INT_VECTORS)
702 {
703 status = kStatus_Fail;
704 }
705#endif
706
707 else
708 {
709#if defined(__GIC_PRIO_BITS)
710 GIC_ClearPendingIRQ(interrupt);
711#else
712 NVIC_ClearPendingIRQ(interrupt);
713#endif
714 }
715
716 return status;
717}
718
727static inline uint32_t DisableGlobalIRQ(void)
728{
729 uint32_t mask;
730
731#if defined(CPSR_I_Msk)
732 mask = __get_CPSR() & CPSR_I_Msk;
733#elif defined(DAIF_I_BIT)
734 mask = __get_DAIF() & DAIF_I_BIT;
735#else
736 mask = __get_PRIMASK();
737#endif
739
740 return mask;
741}
742
753static inline void EnableGlobalIRQ(uint32_t primask)
754{
755#if defined(CPSR_I_Msk)
756 __set_CPSR((__get_CPSR() & ~CPSR_I_Msk) | primask);
757#elif defined(DAIF_I_BIT)
758 if (0UL == primask)
759 {
760 __enable_irq();
761 }
762#else
763 __set_PRIMASK(primask);
764#endif
765}
766
767#if defined(ENABLE_RAM_VECTOR_TABLE)
775uint32_t InstallIRQHandler(IRQn_Type irq, uint32_t irqHandler);
776#endif /* ENABLE_RAM_VECTOR_TABLE. */
777
778#if (defined(FSL_FEATURE_SOC_SYSCON_COUNT) && (FSL_FEATURE_SOC_SYSCON_COUNT > 0))
779
780/*
781 * When FSL_FEATURE_POWERLIB_EXTEND is defined to non-zero value,
782 * powerlib should be used instead of these functions.
783 */
784#if !(defined(FSL_FEATURE_POWERLIB_EXTEND) && (FSL_FEATURE_POWERLIB_EXTEND != 0))
798void EnableDeepSleepIRQ(IRQn_Type interrupt);
799
813void DisableDeepSleepIRQ(IRQn_Type interrupt);
814#endif /* FSL_FEATURE_POWERLIB_EXTEND */
815#endif /* FSL_FEATURE_SOC_SYSCON_COUNT */
816
817#if defined(DWT)
821void MSDK_EnableCpuCycleCounter(void);
822
828uint32_t MSDK_GetCpuCycleCount(void);
829#endif
830
831#if defined(__cplusplus)
832}
833#endif /* __cplusplus*/
834
837#endif /* _FSL_COMMON_ARM_H_ */
__STATIC_FORCEINLINE void __disable_irq(void)
Disable IRQ Interrupts.
Definition: cmsis_gcc.h:977
__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void)
Get Priority Mask.
Definition: cmsis_gcc.h:1221
__STATIC_FORCEINLINE void __enable_irq(void)
Enable IRQ Interrupts.
Definition: cmsis_gcc.h:966
__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask)
Set Priority Mask.
Definition: cmsis_gcc.h:1251
@ NotAvail_IRQn
Definition: MIMXRT1052.h:82
IRQn_Type
STM32H7XX Interrupt Number Definition, according to the selected device in Library_configuration_sect...
Definition: stm32h723xx.h:49
int32_t status_t
Type used for all status and error return values.
Definition: fsl_common.h:225
@ kStatus_Success
Definition: fsl_common.h:211
@ kStatus_Fail
Definition: fsl_common.h:212