RTEMS 6.1-rc1
powerpc-utility.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-2-Clause */
2
12/*
13 * Copyright (C) 2008, 2015 embedded brains GmbH & Co. KG
14 *
15 * access function for Device Control Registers inspired by "ppc405common.h"
16 * from Michael Hamel ADInstruments May 2008
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 * notice, this list of conditions and the following disclaimer in the
25 * documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
50#ifndef __LIBCPU_POWERPC_UTILITY_H
51#define __LIBCPU_POWERPC_UTILITY_H
52
53#if !defined(ASM)
54 #include <rtems.h>
55#endif
56
57#include <rtems/score/cpu.h>
60
61#ifdef __cplusplus
62extern "C" {
63#endif
64
65#if !defined(ASM)
66
67#include <rtems/bspIo.h>
68
69#include <libcpu/cpuIdent.h>
70
71#define LINKER_SYMBOL(sym) extern char sym [];
72
76static inline uint8_t ppc_read_byte(const volatile void *src)
77{
78 uint8_t value;
79
80 __asm__ volatile (
81 "lbz %0, 0(%1)"
82 : "=r" (value)
83 : "b" (src)
84 );
85
86 return value;
87}
88
92static inline uint16_t ppc_read_half_word(const volatile void *src)
93{
94 uint16_t value;
95
96 __asm__ volatile (
97 "lhz %0, 0(%1)"
98 : "=r" (value)
99 : "b" (src)
100 );
101
102 return value;
103}
104
108static inline uint32_t ppc_read_word(const volatile void *src)
109{
110 uint32_t value;
111
112 __asm__ volatile (
113 "lwz %0, 0(%1)"
114 : "=r" (value)
115 : "b" (src)
116 );
117
118 return value;
119}
120
124static inline void ppc_write_byte(uint8_t value, volatile void *dest)
125{
126 __asm__ volatile (
127 "stb %0, 0(%1)"
128 :
129 : "r" (value), "b" (dest)
130 );
131}
132
136static inline void ppc_write_half_word(uint16_t value, volatile void *dest)
137{
138 __asm__ volatile (
139 "sth %0, 0(%1)"
140 :
141 : "r" (value), "b" (dest)
142 );
143}
144
148static inline void ppc_write_word(uint32_t value, volatile void *dest)
149{
150 __asm__ volatile (
151 "stw %0, 0(%1)" :
152 : "r" (value), "b" (dest)
153 );
154}
155
156
157static inline void *ppc_stack_pointer(void)
158{
159 void *sp;
160
161 __asm__ volatile (
162 "mr %0, 1"
163 : "=r" (sp)
164 );
165
166 return sp;
167}
168
169static inline void ppc_set_stack_pointer(void *sp)
170{
171 __asm__ volatile (
172 "mr 1, %0"
173 :
174 : "r" (sp)
175 );
176}
177
178static inline void *ppc_link_register(void)
179{
180 void *lr;
181
182 __asm__ volatile (
183 "mflr %0"
184 : "=r" (lr)
185 );
186
187 return lr;
188}
189
190static inline void ppc_set_link_register(void *lr)
191{
192 __asm__ volatile (
193 "mtlr %0"
194 :
195 : "r" (lr)
196 );
197}
198
199static inline uint32_t ppc_machine_state_register(void)
200{
201 uint32_t msr;
202
203 __asm__ volatile (
204 "mfmsr %0"
205 : "=r" (msr)
206 );
207
208 return msr;
209}
210
211static inline void ppc_set_machine_state_register(uint32_t msr)
212{
213 __asm__ volatile (
214 "mtmsr %0"
215 :
216 : "r" (msr)
217 );
218}
219
220static inline void ppc_synchronize_data(void)
221{
223
224 __asm__ volatile ("sync");
225}
226
227static inline void ppc_light_weight_synchronize(void)
228{
230
231 __asm__ volatile ("lwsync");
232}
233
234static inline void ppc_synchronize_instructions(void)
235{
237
238 __asm__ volatile ("isync");
239}
240
241static inline void ppc_enforce_in_order_execution_of_io(void)
242{
244
245 __asm__ volatile (
246 ".machine \"push\"\n"
247 ".machine \"any\"\n"
248 "eieio\n"
249 ".machine \"pop\""
250 );
251}
252
253static inline void ppc_data_cache_block_flush(void *addr)
254{
255 __asm__ volatile (
256 "dcbf 0, %0"
257 :
258 : "r" (addr)
259 : "memory"
260 );
261}
262
263static inline void ppc_data_cache_block_flush_2(
264 void *base,
265 uintptr_t offset
266)
267{
268 __asm__ volatile (
269 "dcbf %0, %1"
270 :
271 : "b" (base), "r" (offset)
272 : "memory"
273 );
274}
275
276static inline void ppc_data_cache_block_invalidate(void *addr)
277{
278 __asm__ volatile (
279 "dcbi 0, %0"
280 :
281 : "r" (addr)
282 : "memory"
283 );
284}
285
286static inline void ppc_data_cache_block_invalidate_2(
287 void *base,
288 uintptr_t offset
289)
290{
291 __asm__ volatile (
292 "dcbi %0, %1"
293 :
294 : "b" (base), "r" (offset)
295 : "memory"
296 );
297}
298
299static inline void ppc_data_cache_block_store(const void *addr)
300{
301 __asm__ volatile (
302 "dcbst 0, %0"
303 :
304 : "r" (addr)
305 );
306}
307
308static inline void ppc_data_cache_block_store_2(
309 const void *base,
310 uintptr_t offset
311)
312{
313 __asm__ volatile (
314 "dcbst %0, %1"
315 :
316 : "b" (base), "r" (offset)
317 );
318}
319
320static inline void ppc_data_cache_block_touch(const void *addr)
321{
322 __asm__ volatile (
323 "dcbt 0, %0"
324 :
325 : "r" (addr)
326 );
327}
328
329static inline void ppc_data_cache_block_touch_2(
330 const void *base,
331 uintptr_t offset
332)
333{
334 __asm__ volatile (
335 "dcbt %0, %1"
336 :
337 : "b" (base), "r" (offset)
338 );
339}
340
341static inline void ppc_data_cache_block_touch_for_store(const void *addr)
342{
343 __asm__ volatile (
344 "dcbtst 0, %0"
345 :
346 : "r" (addr)
347 );
348}
349
350static inline void ppc_data_cache_block_touch_for_store_2(
351 const void *base,
352 uintptr_t offset
353)
354{
355 __asm__ volatile (
356 "dcbtst %0, %1"
357 :
358 : "b" (base), "r" (offset)
359 );
360}
361
362static inline void ppc_data_cache_block_clear_to_zero(void *addr)
363{
364 __asm__ volatile (
365 "dcbz 0, %0"
366 :
367 : "r" (addr)
368 : "memory"
369 );
370}
371
372static inline void ppc_data_cache_block_clear_to_zero_2(
373 void *base,
374 uintptr_t offset
375)
376{
377 __asm__ volatile (
378 "dcbz %0, %1"
379 :
380 : "b" (base), "r" (offset)
381 : "memory"
382 );
383}
384
385static inline void ppc_instruction_cache_block_invalidate(void *addr)
386{
387 __asm__ volatile (
388 "icbi 0, %0"
389 :
390 : "r" (addr)
391 );
392}
393
394static inline void ppc_instruction_cache_block_invalidate_2(
395 void *base,
396 uintptr_t offset
397)
398{
399 __asm__ volatile (
400 "icbi %0, %1"
401 :
402 : "b" (base), "r" (offset)
403 );
404}
405
412static inline uint32_t ppc_external_exceptions_enable(void)
413{
414 uint32_t current_msr;
415 uint32_t new_msr;
416
418
419 __asm__ volatile (
420 "mfmsr %0;"
421 "ori %1, %0, 0x8000;"
422 "mtmsr %1"
423 : "=r" (current_msr), "=r" (new_msr)
424 );
425
426 return current_msr;
427}
428
434static inline void ppc_external_exceptions_disable(uint32_t msr)
435{
436 ppc_set_machine_state_register(msr);
437
439}
440
441static inline uint32_t ppc_count_leading_zeros(uint32_t value)
442{
443 uint32_t count;
444
445 __asm__ (
446 "cntlzw %0, %1;"
447 : "=r" (count)
448 : "r" (value)
449 );
450
451 return count;
452}
453
454/*
455 * Simple spin delay in microsecond units for device drivers.
456 * This is very dependent on the clock speed of the target.
457 */
458
459#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
460/* Wonderful bookE doesn't have mftb/mftbu; they only
461 * define the TBRU/TBRL SPRs so we use these. Luckily,
462 * we run in supervisory mode so that should work on
463 * all CPUs. In user mode we'd have a problem...
464 * 2007/11/30, T.S.
465 *
466 * OTOH, PSIM currently lacks support for reading
467 * SPRs 268/269. You need GDB patch sim/2376 to avoid
468 * a crash...
469 * OTOH, the MPC8xx do not allow to read the timebase registers via mfspr.
470 * we NEED a mftb to access the time base.
471 * 2009/10/30 Th. D.
472 */
473#define CPU_Get_timebase_low( _value ) \
474 __asm__ volatile( "mftb %0" : "=r" (_value) )
475#else
476#define CPU_Get_timebase_low( _value ) \
477 __asm__ volatile( "mfspr %0,268" : "=r" (_value) )
478#endif
479
480/* Must be provided for rtems_bsp_delay to work */
481extern uint32_t bsp_clicks_per_usec;
482
483#define rtems_bsp_delay( _microseconds ) \
484 do { \
485 uint32_t start, ticks, now; \
486 CPU_Get_timebase_low( start ) ; \
487 ticks = (_microseconds) * bsp_clicks_per_usec; \
488 do \
489 CPU_Get_timebase_low( now ) ; \
490 while (now - start < ticks); \
491 } while (0)
492
493#define rtems_bsp_delay_in_bus_cycles( _cycles ) \
494 do { \
495 uint32_t start, now; \
496 CPU_Get_timebase_low( start ); \
497 do \
498 CPU_Get_timebase_low( now ); \
499 while (now - start < (_cycles)); \
500 } while (0)
501
502/*
503 * Routines to access the decrementer register
504 */
505
506#define PPC_Set_decrementer( _clicks ) \
507 do { \
508 __asm__ volatile( "mtdec %0" : : "r" ((_clicks)) ); \
509 } while (0)
510
511#define PPC_Get_decrementer( _clicks ) \
512 __asm__ volatile( "mfdec %0" : "=r" (_clicks) )
513
514/*
515 * Routines to access the time base register
516 */
517
518static inline uint64_t PPC_Get_timebase_register( void )
519{
520 uint32_t tbr_low;
521 uint32_t tbr_high;
522 uint32_t tbr_high_old;
523 uint64_t tbr;
524
525 do {
526#if defined(mpx8xx) || defined(mpc860) || defined(mpc821)
527/* See comment above (CPU_Get_timebase_low) */
528 __asm__ volatile( "mftbu %0" : "=r" (tbr_high_old));
529 __asm__ volatile( "mftb %0" : "=r" (tbr_low));
530 __asm__ volatile( "mftbu %0" : "=r" (tbr_high));
531#else
532 __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high_old));
533 __asm__ volatile( "mfspr %0, 268" : "=r" (tbr_low));
534 __asm__ volatile( "mfspr %0, 269" : "=r" (tbr_high));
535#endif
536 } while ( tbr_high_old != tbr_high );
537
538 tbr = tbr_high;
539 tbr <<= 32;
540 tbr |= tbr_low;
541 return tbr;
542}
543
544static inline void PPC_Set_timebase_register (uint64_t tbr)
545{
546 uint32_t tbr_low;
547 uint32_t tbr_high;
548
549 tbr_low = (uint32_t) tbr;
550 tbr_high = (uint32_t) (tbr >> 32);
551 __asm__ volatile( "mtspr 284, %0" : : "r" (tbr_low));
552 __asm__ volatile( "mtspr 285, %0" : : "r" (tbr_high));
553
554}
555
556static inline uint32_t ppc_decrementer_register(void)
557{
558 uint32_t dec;
559
560 PPC_Get_decrementer(dec);
561
562 return dec;
563}
564
565static inline void ppc_set_decrementer_register(uint32_t dec)
566{
567 PPC_Set_decrementer(dec);
568}
569
573#define PPC_STRINGOF(x) #x
574
580#define PPC_SPECIAL_PURPOSE_REGISTER(spr, val) \
581 __asm__ volatile (\
582 "mfspr %0, " PPC_STRINGOF(spr) \
583 : "=r" (val) \
584 )
585
590#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val) \
591 do { \
592 __asm__ volatile (\
593 "mtspr " PPC_STRINGOF(spr) ", %0" \
594 : \
595 : "r" (val) \
596 ); \
597 } while (0)
598
605#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
606 do { \
607 ISR_Level level; \
608 uint32_t val; \
609 uint32_t mybits = bits; \
610 _ISR_Local_disable(level); \
611 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
612 val |= mybits; \
613 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
614 _ISR_Local_enable(level); \
615 } while (0)
616
624#define PPC_SET_SPECIAL_PURPOSE_REGISTER_BITS_MASKED(spr, bits, mask) \
625 do { \
626 ISR_Level level; \
627 uint32_t val; \
628 uint32_t mybits = bits; \
629 uint32_t mymask = mask; \
630 _ISR_Local_disable(level); \
631 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
632 val &= ~mymask; \
633 val |= mybits; \
634 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
635 _ISR_Local_enable(level); \
636 } while (0)
637
644#define PPC_CLEAR_SPECIAL_PURPOSE_REGISTER_BITS(spr, bits) \
645 do { \
646 ISR_Level level; \
647 uint32_t val; \
648 uint32_t mybits = bits; \
649 _ISR_Local_disable(level); \
650 PPC_SPECIAL_PURPOSE_REGISTER(spr, val); \
651 val &= ~mybits; \
652 PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val); \
653 _ISR_Local_enable(level); \
654 } while (0)
655
661#define PPC_THREAD_MGMT_REGISTER(tmr) \
662 ({ \
663 uint32_t val; \
664 __asm__ volatile (\
665 "mftmr %0, " PPC_STRINGOF(tmr) \
666 : "=r" (val) \
667 ); \
668 val;\
669 } )
670
675#define PPC_SET_THREAD_MGMT_REGISTER(tmr, val) \
676 do { \
677 __asm__ volatile (\
678 "mttmr " PPC_STRINGOF(tmr) ", %0" \
679 : \
680 : "r" (val) \
681 ); \
682 } while (0)
683
691#define PPC_DEVICE_CONTROL_REGISTER(dcr) \
692 ({ \
693 uint32_t val; \
694 __asm__ volatile (\
695 "mfdcr %0, " PPC_STRINGOF(dcr) \
696 : "=r" (val) \
697 ); \
698 val;\
699 } )
700
707#define PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val) \
708 do { \
709 __asm__ volatile (\
710 "mtdcr " PPC_STRINGOF(dcr) ", %0" \
711 : \
712 : "r" (val) \
713 ); \
714 } while (0)
715
722#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
723 do { \
724 ISR_Level level; \
725 uint32_t val; \
726 uint32_t mybits = bits; \
727 _ISR_Local_disable(level); \
728 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
729 val |= mybits; \
730 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
731 _ISR_Local_enable(level); \
732 } while (0)
733
741#define PPC_SET_DEVICE_CONTROL_REGISTER_BITS_MASKED(dcr, bits, mask) \
742 do { \
743 ISR_Level level; \
744 uint32_t val; \
745 uint32_t mybits = bits; \
746 uint32_t mymask = mask; \
747 _ISR_Local_disable(level); \
748 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
749 val &= ~mymask; \
750 val |= mybits; \
751 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
752 _ISR_Local_enable(level); \
753 } while (0)
754
761#define PPC_CLEAR_DEVICE_CONTROL_REGISTER_BITS(dcr, bits) \
762 do { \
763 ISR_Level level; \
764 uint32_t val; \
765 uint32_t mybits = bits; \
766 _ISR_Local_disable(level); \
767 val = PPC_DEVICE_CONTROL_REGISTER(dcr); \
768 val &= ~mybits; \
769 PPC_SET_DEVICE_CONTROL_REGISTER(dcr, val); \
770 _ISR_Local_enable(level); \
771 } while (0)
772
773static inline uint32_t ppc_time_base(void)
774{
775 uint32_t val;
776
777 CPU_Get_timebase_low(val);
778
779 return val;
780}
781
782static inline void ppc_set_time_base(uint32_t val)
783{
785}
786
787static inline uint32_t ppc_time_base_upper(void)
788{
789 uint32_t val;
791 return val;
792}
793
794static inline void ppc_set_time_base_upper(uint32_t val)
795{
797}
798
799static inline uint64_t ppc_time_base_64(void)
800{
801 return PPC_Get_timebase_register();
802}
803
804static inline void ppc_set_time_base_64(uint64_t val)
805{
806 PPC_Set_timebase_register(val);
807}
808
809static inline uint32_t ppc_alternate_time_base(void)
810{
811 uint32_t val;
812 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBL, val);
813 return val;
814}
815
816static inline uint32_t ppc_alternate_time_base_upper(void)
817{
818 uint32_t val;
819 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_ATBU, val);
820 return val;
821}
822
823static inline uint64_t ppc_alternate_time_base_64(void)
824{
825 uint32_t atbl;
826 uint32_t atbu_0;
827 uint32_t atbu_1;
828
829 do {
830 atbu_0 = ppc_alternate_time_base_upper();
831 atbl = ppc_alternate_time_base();
832 atbu_1 = ppc_alternate_time_base_upper();
833 } while (atbu_0 != atbu_1);
834
835 return (((uint64_t) atbu_1) << 32) | ((uint64_t) atbl);
836}
837
838static inline uint32_t ppc_processor_id(void)
839{
840 uint32_t val;
841 PPC_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
842 return val;
843}
844
845static inline void ppc_set_processor_id(uint32_t val)
846{
847 PPC_SET_SPECIAL_PURPOSE_REGISTER(BOOKE_PIR, val);
848}
849
850static inline uint32_t ppc_fsl_system_version(void)
851{
852 uint32_t val;
853 PPC_SPECIAL_PURPOSE_REGISTER(FSL_EIS_SVR, val);
854 return val;
855}
856
857static inline uint32_t ppc_fsl_system_version_cid(uint32_t svr)
858{
859 return (svr >> 28) & 0xf;
860}
861
862static inline uint32_t ppc_fsl_system_version_sid(uint32_t svr)
863{
864 return (svr >> 16) & 0xfff;
865}
866
867static inline uint32_t ppc_fsl_system_version_proc(uint32_t svr)
868{
869 return (svr >> 12) & 0xf;
870}
871
872static inline uint32_t ppc_fsl_system_version_mfg(uint32_t svr)
873{
874 return (svr >> 8) & 0xf;
875}
876
877static inline uint32_t ppc_fsl_system_version_mjrev(uint32_t svr)
878{
879 return (svr >> 4) & 0xf;
880}
881
882static inline uint32_t ppc_fsl_system_version_mnrev(uint32_t svr)
883{
884 return (svr >> 0) & 0xf;
885}
886
887static inline void ppc_msync(void)
888{
889 __asm__ volatile (
890 ".machine push\n"
891 ".machine e500\n"
892 "msync\n"
893 ".machine pop"
894 :
895 :
896 : "memory"
897 );
898}
899
900static inline void ppc_tlbre(void)
901{
902 __asm__ volatile (
903 ".machine push\n"
904 ".machine e500\n"
905 "tlbre\n"
906 ".machine pop"
907 :
908 :
909 : "memory"
910 );
911}
912
913static inline void ppc_tlbwe(void)
914{
915 __asm__ volatile (
916 ".machine push\n"
917 ".machine e500\n"
918 "tlbwe\n"
919 ".machine pop"
920 :
921 :
922 : "memory"
923 );
924}
925
926static inline void ppc_tlbsx(void *addr)
927{
928 __asm__ volatile (
929 ".machine push\n"
930 ".machine e500\n"
931 "tlbsx 0, %0\n"
932 ".machine pop"
933 :
934 : "r" (addr)
935 : "memory"
936 );
937}
938
939static inline void ppc_mtivpr(void *prefix)
940{
941 __asm__ volatile (
942 ".machine push\n"
943 ".machine e500\n"
944 "mtivpr %0\n"
945 ".machine pop"
946 :
947 : "r" (prefix)
948 );
949}
950
951#define ppc_mtivor(x, vec) __asm__ volatile ( \
952 ".machine push\n" \
953 ".machine e500\n" \
954 "mtivor" RTEMS_XSTRING(x) " %0\n" \
955 ".machine pop" \
956 : \
957 : "r" (vec) \
958 )
959
960void ppc_code_copy(void *dest, const void *src, size_t n);
961
962/* FIXME: Do not use this function */
963void printBAT(int bat, uint32_t upper, uint32_t lower);
964
965/* FIXME: Do not use this function */
966void ShowBATS(void);
967
968#endif /* ifndef ASM */
969
970#if defined(ASM)
971#include <rtems/asm.h>
972
973.macro LA reg, addr
974#if defined(__powerpc64__)
975 lis \reg, (\addr)@highest
976 ori \reg, \reg, (\addr)@higher
977 rldicr \reg, \reg, 32, 31
978 oris \reg, \reg, (\addr)@h
979 ori \reg, \reg, (\addr)@l
980#else
981 lis \reg, (\addr)@h
982 ori \reg, \reg, (\addr)@l
983#endif
984.endm
985
986.macro LA32 reg, addr
987 lis \reg, (\addr)@h
988 ori \reg, \reg, (\addr)@l
989.endm
990
991.macro LWI reg, value
992 lis \reg, (\value)@h
993 ori \reg, \reg, (\value)@l
994.endm
995
996.macro LW reg, addr
997 lis \reg, \addr@ha
998 lwz \reg, \addr@l(\reg)
999.endm
1000
1001/*
1002 * Tests the bits in reg1 against the bits set in mask. A match is indicated
1003 * by EQ = 0 in CR0. A mismatch is indicated by EQ = 1 in CR0. The register
1004 * reg2 is used to load the mask.
1005 */
1006.macro TSTBITS reg1, reg2, mask
1007 LWI \reg2, \mask
1008 and \reg1, \reg1, \reg2
1009 cmplw \reg1, \reg2
1010.endm
1011
1012.macro SETBITS reg1, reg2, mask
1013 LWI \reg2, \mask
1014 or \reg1, \reg1, \reg2
1015.endm
1016
1017.macro CLRBITS reg1, reg2, mask
1018 LWI \reg2, \mask
1019 andc \reg1, \reg1, \reg2
1020.endm
1021
1022.macro GLOBAL_FUNCTION name
1023 .global \name
1024 .type \name, @function
1025\name:
1026.endm
1027
1028/*
1029 * Obtain interrupt mask
1030 */
1031.macro GET_INTERRUPT_MASK mask
1032 lis \mask, _PPC_INTERRUPT_DISABLE_MASK@h
1033 ori \mask, \mask, _PPC_INTERRUPT_DISABLE_MASK@l
1034.endm
1035
1036/*
1037 * Disables all asynchronous exeptions (interrupts) which may cause a context
1038 * switch.
1039 */
1040.macro INTERRUPT_DISABLE level, mask
1041 mfmsr \level
1042 GET_INTERRUPT_MASK mask=\mask
1043 andc \mask, \level, \mask
1044 mtmsr \mask
1045.endm
1046
1047/*
1048 * Restore previous machine state.
1049 */
1050.macro INTERRUPT_ENABLE level
1051 mtmsr \level
1052.endm
1053
1054.macro SET_SELF_CPU_CONTROL reg_0, reg_1
1055#if defined(RTEMS_SMP)
1056 /* Use Book E Processor ID Register (PIR) */
1057 mfspr \reg_0, 286
1058 slwi \reg_0, \reg_0, PER_CPU_CONTROL_SIZE_LOG2
1059#if defined(__powerpc64__)
1060 LA \reg_1, _Per_CPU_Information
1061 add \reg_0, \reg_0, \reg_1
1062#else
1063 addis \reg_0, \reg_0, _Per_CPU_Information@ha
1064 addi \reg_0, \reg_0, _Per_CPU_Information@l
1065#endif
1066 mtspr PPC_PER_CPU_CONTROL_REGISTER, \reg_0
1067#endif
1068.endm
1069
1070.macro GET_SELF_CPU_CONTROL reg
1071#if defined(RTEMS_SMP)
1072 mfspr \reg, PPC_PER_CPU_CONTROL_REGISTER
1073#else
1074 lis \reg, _Per_CPU_Information@h
1075 ori \reg, \reg, _Per_CPU_Information@l
1076#endif
1077.endm
1078
1079.macro SHIFT_RIGHT_IMMEDIATE rd, rs, imm
1080#if defined(__powerpc64__)
1081 srdi \rd, \rs, \imm
1082#else
1083 srwi \rd, \rs, \imm
1084#endif
1085.endm
1086
1087.macro COMPARE_LOGICAL cr, ra, rb
1088#if defined(__powerpc64__)
1089 cmpld \cr, \ra, \rb
1090#else
1091 cmplw \cr, \ra, \rb
1092#endif
1093.endm
1094
1095.macro CLEAR_RIGHT_IMMEDIATE rd, rs, imm
1096#if defined(__powerpc64__)
1097 clrrdi \rd, \rs, \imm
1098#else
1099 clrrwi \rd, \rs, \imm
1100#endif
1101.endm
1102
1103#define LINKER_SYMBOL(sym) .extern sym
1104
1105#endif /* ASM */
1106
1107#ifdef __cplusplus
1108}
1109#endif
1110
1113#endif /* __LIBCPU_POWERPC_UTILITY_H */
This header file provides the kernel character input/output support API.
#define RTEMS_COMPILER_MEMORY_BARRIER()
This macro forbids the compiler to reorder read and write commands around it.
Definition: basedefs.h:258
uint32_t bsp_clicks_per_usec
Time base clicks per micro second.
Definition: bspstart.c:99
#define PPC_SET_SPECIAL_PURPOSE_REGISTER(spr, val)
Sets the Special Purpose Register with number spr to the value in val.
Definition: powerpc-utility.h:590
#define PPC_SPECIAL_PURPOSE_REGISTER(spr, val)
Returns the value of the Special Purpose Register with number spr.
Definition: powerpc-utility.h:580
register struct Per_CPU_Control *_SPARC_Per_CPU_current __asm__("g6")
The pointer to the current per-CPU control is available via register g6.
CPU_STRUCTURE_ALIGNMENT Per_CPU_Control_envelope _Per_CPU_Information[]
Set of Per CPU Core Information.
Definition: asm.h:171
#define ra
return address *‍/
Definition: regs.h:66
#define sp
stack-pointer *‍/
Definition: regs.h:64
char _PPC_INTERRUPT_DISABLE_MASK[]
A global symbol used to disable interrupts in the MSR.
This header file defines the RTEMS Classic API.
unsigned l
Definition: tte.h:13