RTEMS 6.1-rc6
Loading...
Searching...
No Matches
powerpc-utility.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * Copyright (C) 2008, 2015 embedded brains GmbH & Co. KG
14 *
15 * access function for Device Control Registers inspired by "ppc405common.h"
16 * from Michael Hamel ADInstruments May 2008
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
50#ifndef __LIBCPU_POWERPC_UTILITY_H
51#define __LIBCPU_POWERPC_UTILITY_H
52
53#if !defined(ASM)
54 #include <rtems.h>
55#endif
56
57#include <rtems/score/cpu.h>
60
61#ifdef __cplusplus
62extern "C" {
63#endif
64
65#if !defined(ASM)
66
67#include <rtems/bspIo.h>
68
69#include <libcpu/cpuIdent.h>
70
71#define LINKER_SYMBOL(sym) extern char sym [];
72
76static inline uint8_t ppc_read_byte(const volatile void *src)
77{
78 uint8_t value;
79
80 __asm__ volatile (
81 "lbz %0, 0(%1)"
82 : "=r" (value)
83 : "b" (src)
84 );
85
86 return value;
87}
88
92static inline uint16_t ppc_read_half_word(const volatile void *src)
93{
94 uint16_t value;
95
96 __asm__ volatile (
97 "lhz %0, 0(%1)"
98 : "=r" (value)
99 : "b" (src)
100 );
101
102 return value;
103}
104
108static inline uint32_t ppc_read_word(const volatile void *src)
109{
110 uint32_t value;
111
112 __asm__ volatile (
113 "lwz %0, 0(%1)"
114 : "=r" (value)
115 : "b" (src)
116 );
117
118 return value;
119}
120
124static inline void ppc_write_byte(uint8_t value, volatile void *dest)
125{
126 __asm__ volatile (
127 "stb %0, 0(%1)"
128 :
129 : "r" (value), "b" (dest)
130 );
131}
132
136static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
137{
138 __asm__ volatile (
139 "sth %0, 0(%1)"
140 :
141 : "r" (value), "b" (dest)
142 );
143}
144
148static inline void ppc_write_word(uint32_t value, volatile void *dest)
149{
150 __asm__ volatile (
151 "stw %0, 0(%1)" :
152 : "r" (value), "b" (dest)
153 );
154}
155
156
157static inline void *ppc_stack_pointer(void)
158{
159 void *sp;
160
161 __asm__ volatile (
162 "mr %0, 1"
163 : "=r" (sp)
164 );
165
166 return sp;
167}
168
169static inline void ppc_set_stack_pointer(void *sp)
170{
171 __asm__ volatile (
172 "mr 1, %0"
173 :
174 : "r" (sp)
175 );
176}
177
178static inline void *ppc_link_register(void)
179{
180 void *lr;
181
182 __asm__ volatile (
183 "mflr %0"
184 : "=r" (lr)
185 );
186
187 return lr;
188}
189
190static inline void ppc_set_link_register(void *lr)
191{
192 __asm__ volatile (
193 "mtlr %0"
194 :
195 : "r" (lr)
196 );
197}
198
199static inline uint32_t ppc_machine_state_register(void)
200{
201 uint32_t msr;
202
203 __asm__ volatile (
204 "mfmsr %0"
205 : "=r" (msr)
206 );
207
208 return msr;
209}
210
211static inline void ppc_set_machine_state_register(uint32_t msr)
212{
213 __asm__ volatile (
214 "mtmsr %0"
215 :
216 : "r" (msr)
217 );
218}
219
220static inline void ppc_synchronize_data(void)
221{
223
224 __asm__ volatile ("sync");
225}
226
227static inline void ppc_light_weight_synchronize(void)
228{
230
231 __asm__ volatile ("lwsync");
232}
233
234static inline void ppc_synchronize_instructions(void)
235{
237
238 __asm__ volatile ("isync");
239}
240
241static inline void ppc_enforce_in_order_execution_of_io(void)
242{
244
245 __asm__ volatile (
246 ".machine \"push\"\n"
247 ".machine \"any\"\n"
248 "eieio\n"
249 ".machine \"pop\""
250 );
251}
252
253static inline void ppc_data_cache_block_flush(void *addr)
254{
255 __asm__ volatile (
256 "dcbf 0, %0"
257 :
258 : "r" (addr)
259 : "memory"
260 );
261}
262
263static inline void ppc_data_cache_block_flush_2(
264 void *base,
265 uintptr_t offset
266)
267{
268 __asm__ volatile (
269 "dcbf %0, %1"
270 :
271 : "b" (base), "r" (offset)
272 : "memory"
273 );
274}
275
276static inline void ppc_data_cache_block_invalidate(void *addr)
277{
278 __asm__ volatile (
279 "dcbi 0, %0"
280 :
281 : "r" (addr)
282 : "memory"
283 );
284}
285
286static inline void ppc_data_cache_block_invalidate_2(
287 void *base,
288 uintptr_t offset
289)
290{
291 __asm__ volatile (
292 "dcbi %0, %1"
293 :
294 : "b" (base), "r" (offset)
295 : "memory"
296 );
297}
298
299static inline void ppc_data_cache_block_store(const void *addr)
300{
301 __asm__ volatile (
302 "dcbst 0, %0"
303 :
304 : "r" (addr)
305 );
306}
307
308static inline void ppc_data_cache_block_store_2(
309 const void *base,
310 uintptr_t offset
311)
312{
313 __asm__ volatile (
314 "dcbst %0, %1"
315 :
316 : "b" (base), "r" (offset)
317 );
318}
319
320static inline void ppc_data_cache_block_touch(const void *addr)
321{
322 __asm__ volatile (
323 "dcbt 0, %0"
324 :
325 : "r" (addr)
326 );
327}
328
329static inline void ppc_data_cache_block_touch_2(
330 const void *base,
331 uintptr_t offset
332)
333{
334 __asm__ volatile (
335 "dcbt %0, %1"
336 :
337 : "b" (base), "r" (offset)
338 );
339}
340
341static inline void ppc_data_cache_block_touch_for_store(const void *addr)
342{
343 __asm__ volatile (
344 "dcbtst 0, %0"
345 :
346 : "r" (addr)
347 );
348}
349
350static inline void ppc_data_cache_block_touch_for_store_2(
351 const void *base,
352 uintptr_t offset
353)
354{
355 __asm__ volatile (
356 "dcbtst %0, %1"
357 :
358 : "b" (base), "r" (offset)
359 );
360}
361
362static inline void ppc_data_cache_block_clear_to_zero(void *addr)
363{
364 __asm__ volatile (
365 "dcbz 0, %0"
366 :
367 : "r" (addr)
368 : "memory"
369 );
370}
371
372static inline void ppc_data_cache_block_clear_to_zero_2(
373 void *base,
374 uintptr_t offset
375)
376{
377 __asm__ volatile (
378 "dcbz %0, %1"
379 :
380 : "b" (base), "r" (offset)
381 : "memory"
382 );
383}
384
385static inline void ppc_instruction_cache_block_invalidate(void *addr)
386{
387 __asm__ volatile (
388 "icbi 0, %0"
389 :
390 : "r" (addr)
391 );
392}
393
394static inline void ppc_instruction_cache_block_invalidate_2(
395 void *base,
396 uintptr_t offset
397)
398{
399 __asm__ volatile (
400 "icbi %0, %1"
401 :
402 : "b" (base), "r" (offset)
403 );
404}
405
412static inline uint32_t ppc_external_exceptions_enable(void)
413{
414 uint32_t current_msr;
415 uint32_t new_msr;
416
418
419 __asm__ volatile (
420 "mfmsr %0;"
421 "ori %1, %0, 0x8000;"
422 "mtmsr %1"
423 : "=r" (current_msr), "=r" (new_msr)
424 );
425
426 return current_msr;
427}
428
434static inline void ppc_external_exceptions_disable(uint32_t msr)
435{
436 ppc_set_machine_state_register(msr);
437
439}
440
441static inline uint32_t ppc_count_leading_zeros(uint32_t value)
442{
443 uint32_t count;
444
445 __asm__ (
446 "cntlzw %0, %1;"
447 : "=r" (count)
448 : "r" (value)
449 );
450
451 return count;
452}
453
454/*
455 * Simple spin delay in microsecond units for device drivers.
456 * This is very dependent on the clock speed of the target.
457 */
458
459#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
460/* Wonderful bookE doesn't have mftb/mftbu; they only
461 * define the TBRU/TBRL SPRs so we use these. Luckily,
462 * we run in supervisory mode so that should work on
463 * all CPUs. In user mode we'd have a problem...
464 * 2007/11/30, T.S.
465 *
466 * OTOH, PSIM currently lacks support for reading
467 * SPRs 268/269. You need GDB patch sim/2376 to avoid
468 * a crash...
469 * OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
470 * we NEED a mftb to access the time base.
471 * 2009/10/30 Th. D.
472 */
473#define CPU_Get_timebase_low( _value ) \
474 __asm__ volatile( "mftb %0" : "=r" (_value) )
475#else
476#define CPU_Get_timebase_low( _value ) \
477 __asm__ volatile( "mfspr %0,268" : "=r" (_value) )
478#endif
479
480/* Must be provided for rtems_bsp_delay to work */
481extern uint32_t bsp_clicks_per_usec;
482
483#define rtems_bsp_delay( _microseconds ) \
484 do { \
485 uint32_t start, ticks, now; \
486 CPU_Get_timebase_low( start ) ; \
487 ticks = (_microseconds) * bsp_clicks_per_usec; \
488 do \
489 CPU_Get_timebase_low( now ) ; \
490 while (now - start < ticks); \
491 } while (0)
492
493#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
494 do { \
495 uint32_t start, now; \
496 CPU_Get_timebase_low( start ); \
497 do \
498 CPU_Get_timebase_low( now ); \
499 while (now - start < (_cycles)); \
500 } while (0)
501
502/*
503 * Routines to access the decrementer register
504 */
505
506#define PPC_Set_decrementer( _clicks ) \
507 do { \
508 __asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
509 } while (0)
510
511#define PPC_Get_decrementer( _clicks ) \
512 __asm__ volatile( "mfdec %0" : "=r" (_clicks) )
513
514/*
515 * Routines to access the time base register
516 */
517
518static inline uint64_t PPC_Get_timebase_register( void )
519{
520 uint32_t tbr_low;
521 uint32_t tbr_high;
522 uint32_t tbr_high_old;
523 uint64_t tbr;
524
525 do {
526#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
527/* See comment above (CPU_Get_timebase_low) */
528 __asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
529 __asm__ volatile( "mftb %0" : "=r" (tbr_low));
530 __asm__ volatile( "mftbu %0" : "=r" (tbr_high));
531#else
532 __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
533 __asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
534 __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
535#endif
536 } while ( tbr_high_old != tbr_high );
537
538 tbr = tbr_high;
539 tbr <<= 32;
540 tbr |= tbr_low;
541 return tbr;
542}
543
544static inline void PPC_Set_timebase_register (uint64_t tbr)
545{
546 uint32_t tbr_low;
547 uint32_t tbr_high;
548
549 tbr_low = (uint32_t) tbr;
550 tbr_high = (uint32_t) (tbr >> 32);
551 __asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
552 __asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
553
554}
555
556static inline uint32_t ppc_decrementer_register(void)
557{
558 uint32_t dec;
559
560 PPC_Get_decrementer(dec);
561
562 return dec;
563}
564
565static inline void ppc_set_decrementer_register(uint32_t dec)
566{
567 PPC_Set_decrementer(dec);
568}
569
573#define PPC_STRINGOF(x) #x
574
580#define PPC_SPECIAL_PURPOSE_REGISTER(spr, val) \
581 __asm__ volatile (\
582 "mfspr %0, " PPC_STRINGOF(spr) \
583 : "=r" (val) \
584 )
585
590#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
591 do { \
592 __asm__ volatile (\
593 "mtspr " PPC_STRINGOF(spr) ", %0" \
594 : \
595 : "r" (val) \
596 ); \
597 } while (0)
598
605#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
606 do { \
607 ISR_Level level; \
608 uint32_t val; \
609 uint32_t mybits = bits; \
610 _ISR_Local_disable(level); \
611 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
612 val |= mybits; \
613 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
614 _ISR_Local_enable(level); \
615 } while (0)
616
624#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
625 do { \
626 ISR_Level level; \
627 uint32_t val; \
628 uint32_t mybits = bits; \
629 uint32_t mymask = mask; \
630 _ISR_Local_disable(level); \
631 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
632 val &= ~mymask; \
633 val |= mybits; \
634 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
635 _ISR_Local_enable(level); \
636 } while (0)
637
644#define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
645 do { \
646 ISR_Level level; \
647 uint32_t val; \
648 uint32_t mybits = bits; \
649 _ISR_Local_disable(level); \
650 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
651 val &= ~mybits; \
652 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
653 _ISR_Local_enable(level); \
654 } while (0)
655
661#define PPC_THREAD_MGMT_REGISTER(tmr) \
662 ({ \
663 uint32_t val; \
664 __asm__ volatile (\
665 "mftmr %0, " PPC_STRINGOF(tmr) \
666 : "=r" (val) \
667 ); \
668 val;\
669 } )
670
675#define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
676 do { \
677 __asm__ volatile (\
678 "mttmr " PPC_STRINGOF(tmr) ", %0" \
679 : \
680 : "r" (val) \
681 ); \
682 } while (0)
683
691#define PPC_DEVICE_CONTROL_REGISTER(dcr) \
692 ({ \
693 uint32_t val; \
694 __asm__ volatile (\
695 "mfdcr %0, " PPC_STRINGOF(dcr) \
696 : "=r" (val) \
697 ); \
698 val;\
699 } )
700
707#define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
708 do { \
709 __asm__ volatile (\
710 "mtdcr " PPC_STRINGOF(dcr) ", %0" \
711 : \
712 : "r" (val) \
713 ); \
714 } while (0)
715
722#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
723 do { \
724 ISR_Level level; \
725 uint32_t val; \
726 uint32_t mybits = bits; \
727 _ISR_Local_disable(level); \
728 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
729 val |= mybits; \
730 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
731 _ISR_Local_enable(level); \
732 } while (0)
733
741#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
742 do { \
743 ISR_Level level; \
744 uint32_t val; \
745 uint32_t mybits = bits; \
746 uint32_t mymask = mask; \
747 _ISR_Local_disable(level); \
748 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
749 val &= ~mymask; \
750 val |= mybits; \
751 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
752 _ISR_Local_enable(level); \
753 } while (0)
754
761#define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
762 do { \
763 ISR_Level level; \
764 uint32_t val; \
765 uint32_t mybits = bits; \
766 _ISR_Local_disable(level); \
767 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
768 val &= ~mybits; \
769 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
770 _ISR_Local_enable(level); \
771 } while (0)
772
773static inline uint32_t ppc_time_base(void)
774{
775 uint32_t val;
776
777 CPU_Get_timebase_low(val);
778
779 return val;
780}
781
782static inline void ppc_set_time_base(uint32_t val)
783{
785}
786
787static inline uint32_t ppc_time_base_upper(void)
788{
789 uint32_t val;
791 return val;
792}
793
794static inline void ppc_set_time_base_upper(uint32_t val)
795{
797}
798
799static inline uint64_t ppc_time_base_64(void)
800{
801 return PPC_Get_timebase_register();
802}
803
804static inline void ppc_set_time_base_64(uint64_t val)
805{
806 PPC_Set_timebase_register(val);
807}
808
809static inline uint32_t ppc_alternate_time_base(void)
810{
811 uint32_t val;
812 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL, val);
813 return val;
814}
815
816static inline uint32_t ppc_alternate_time_base_upper(void)
817{
818 uint32_t val;
819 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU, val);
820 return val;
821}
822
823static inline uint64_t ppc_alternate_time_base_64(void)
824{
825 uint32_t atbl;
826 uint32_t atbu_0;
827 uint32_t atbu_1;
828
829 do {
830 atbu_0 = ppc_alternate_time_base_upper();
831 atbl = ppc_alternate_time_base();
832 atbu_1 = ppc_alternate_time_base_upper();
833 } while (atbu_0 != atbu_1);
834
835 return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
836}
837
838static inline uint32_t ppc_processor_id(void)
839{
840 uint32_t val;
841 PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
842 return val;
843}
844
845static inline void ppc_set_processor_id(uint32_t val)
846{
847 PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
848}
849
850static inline uint32_t ppc_fsl_system_version(void)
851{
852 uint32_t val;
853 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR, val);
854 return val;
855}
856
857static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
858{
859 return (svr >> 28) & 0xf;
860}
861
862static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
863{
864 return (svr >> 16) & 0xfff;
865}
866
867static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
868{
869 return (svr >> 12) & 0xf;
870}
871
872static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
873{
874 return (svr >> 8) & 0xf;
875}
876
877static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
878{
879 return (svr >> 4) & 0xf;
880}
881
882static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
883{
884 return (svr >> 0) & 0xf;
885}
886
887static inline void ppc_msync(void)
888{
889 __asm__ volatile (
890 "msync"
891 :
892 :
893 : "memory"
894 );
895}
896
897static inline void ppc_tlbre(void)
898{
899 __asm__ volatile (
900 "tlbre"
901 :
902 :
903 : "memory"
904 );
905}
906
907static inline void ppc_tlbwe(void)
908{
909 __asm__ volatile (
910 "tlbwe"
911 :
912 :
913 : "memory"
914 );
915}
916
917static inline void ppc_tlbsx(void *addr)
918{
919 __asm__ volatile (
920 "tlbsx 0, %0"
921 :
922 : "r" (addr)
923 : "memory"
924 );
925}
926
927static inline void ppc_mtivpr(void *prefix)
928{
929 __asm__ volatile (
930 "mtivpr %0"
931 :
932 : "r" (prefix)
933 );
934}
935
936#define ppc_mtivor(x, vec) __asm__ volatile ( \
937 "mtivor" RTEMS_XSTRING(x) " %0" \
938 : \
939 : "r" (vec) \
940 )
941
942void ppc_code_copy(void *dest, const void *src, size_t n);
943
944/* FIXME: Do not use this function */
945void printBAT(int bat, uint32_t upper, uint32_t lower);
946
947/* FIXME: Do not use this function */
948void ShowBATS(void);
949
950#endif /* ifndef ASM */
951
952#if defined(ASM)
953#include <rtems/asm.h>
954
955.macro LA reg, addr
956#if defined(__powerpc64__)
957 lis \reg, (\addr)@highest
958 ori \reg, \reg, (\addr)@higher
959 rldicr \reg, \reg, 32, 31
960 oris \reg, \reg, (\addr)@h
961 ori \reg, \reg, (\addr)@l
962#else
963 lis \reg, (\addr)@h
964 ori \reg, \reg, (\addr)@l
965#endif
966.endm
967
968.macro LA32 reg, addr
969 lis \reg, (\addr)@h
970 ori \reg, \reg, (\addr)@l
971.endm
972
973.macro LWI reg, value
974 lis \reg, (\value)@h
975 ori \reg, \reg, (\value)@l
976.endm
977
978.macro LW reg, addr
979 lis \reg, \addr@ha
980 lwz \reg, \addr@l(\reg)
981.endm
982
983/*
984 * Tests the bits in reg1 against the bits set in mask. A match is indicated
985 * by EQ = 0 in CR0. A mismatch is indicated by EQ = 1 in CR0. The register
986 * reg2 is used to load the mask.
987 */
988.macro TSTBITS reg1, reg2, mask
989 LWI \reg2, \mask
990 and \reg1, \reg1, \reg2
991 cmplw \reg1, \reg2
992.endm
993
994.macro SETBITS reg1, reg2, mask
995 LWI \reg2, \mask
996 or \reg1, \reg1, \reg2
997.endm
998
999.macro CLRBITS reg1, reg2, mask
1000 LWI \reg2, \mask
1001 andc \reg1, \reg1, \reg2
1002.endm
1003
1004.macro GLOBAL_FUNCTION name
1005 .global \name
1006 .type \name, @function
1007\name:
1008.endm
1009
1010/*
1011 * Obtain interrupt mask
1012 */
1013.macro GET_INTERRUPT_MASK mask
1014 lis \mask, _PPC_INTERRUPT_DISABLE_MASK@h
1015 ori \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
1016.endm
1017
1018/*
1019 * Disables all asynchronous exeptions (interrupts) which may cause a context
1020 * switch.
1021 */
1022.macro INTERRUPT_DISABLE level, mask
1023 mfmsr \level
1024 GET_INTERRUPT_MASK mask=\mask
1025 andc \mask, \level, \mask
1026 mtmsr \mask
1027.endm
1028
1029/*
1030 * Restore previous machine state.
1031 */
1032.macro INTERRUPT_ENABLE level
1033 mtmsr \level
1034.endm
1035
1036.macro SET_SELF_CPU_CONTROL reg_0, reg_1
1037#if defined(RTEMS_SMP)
1038 /* Use Book E Processor ID Register (PIR) */
1039 mfspr \reg_0, 286
1040 slwi \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
1041#if defined(__powerpc64__)
1042 LA \reg_1, _Per_CPU_Information
1043 add \reg_0, \reg_0, \reg_1
1044#else
1045 addis \reg_0, \reg_0, _Per_CPU_Information@ha
1046 addi \reg_0, \reg_0, _Per_CPU_Information@l
1047#endif
1048 mtspr PPC_PER_CPU_CONTROL_REGISTER, \reg_0
1049#endif
1050.endm
1051
1052.macro GET_SELF_CPU_CONTROL reg
1053#if defined(RTEMS_SMP)
1054 mfspr \reg, PPC_PER_CPU_CONTROL_REGISTER
1055#else
1056 lis \reg, _Per_CPU_Information@h
1057 ori \reg, \reg, _Per_CPU_Information@l
1058#endif
1059.endm
1060
1061.macro SHIFT_RIGHT_IMMEDIATE rd, rs, imm
1062#if defined(__powerpc64__)
1063 srdi \rd, \rs, \imm
1064#else
1065 srwi \rd, \rs, \imm
1066#endif
1067.endm
1068
1069.macro COMPARE_LOGICAL cr, ra, rb
1070#if defined(__powerpc64__)
1071 cmpld \cr, \ra, \rb
1072#else
1073 cmplw \cr, \ra, \rb
1074#endif
1075.endm
1076
1077.macro CLEAR_RIGHT_IMMEDIATE rd, rs, imm
1078#if defined(__powerpc64__)
1079 clrrdi \rd, \rs, \imm
1080#else
1081 clrrwi \rd, \rs, \imm
1082#endif
1083.endm
1084
1085#define LINKER_SYMBOL(sym) .extern sym
1086
1087#endif /* ASM */
1088
1089#ifdef __cplusplus
1090}
1091#endif
1092
1095#endif /* __LIBCPU_POWERPC_UTILITY_H */
This header file provides the kernel character input/output support API.
#define RTEMS_COMPILER_MEMORY_BARRIER()
This macro forbids the compiler to reorder read and write commands around it.
Definition: basedefs.h:258
uint32_t bsp_clicks_per_usec
Time base clicks per micro second.
Definition: bspstart.c:99
#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val)
Sets the Special Purpose Register with number spr to the value in val.
Definition: powerpc-utility.h:590
#define PPC_SPECIAL_PURPOSE_REGISTER(spr, val)
Returns the value of the Special Purpose Register with number spr.
Definition: powerpc-utility.h:580
CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[]
Set of Per CPU Core Information.
Definition: asm.h:171
#define ra
return address *‍/
Definition: regs.h:66
#define sp
stack-pointer *‍/
Definition: regs.h:64
char _PPC_INTERRUPT_DISABLE_MASK[]
A global symbol used to disable interrupts in the MSR.
This header file defines the RTEMS Classic API.