RTEMS 4.11Annotated Report
Thu Dec 20 20:56:53 2012
0000f89c <_CORE_message_queue_Initialize>:
CORE_message_queue_Control *the_message_queue,
CORE_message_queue_Attributes *the_message_queue_attributes,
uint32_t maximum_pending_messages,
size_t maximum_message_size
)
{
f89c: e92d40f0 push {r4, r5, r6, r7, lr}
/*
* Check if allocated_message_size is aligned to uintptr-size boundary.
* If not, it will increase allocated_message_size to multiplicity of pointer
* size.
*/
if (allocated_message_size & (sizeof(uintptr_t) - 1)) {
f8a0: e3130003 tst r3, #3
CORE_message_queue_Control *the_message_queue,
CORE_message_queue_Attributes *the_message_queue_attributes,
uint32_t maximum_pending_messages,
size_t maximum_message_size
)
{
f8a4: e1a04000 mov r4, r0
size_t message_buffering_required = 0;
size_t allocated_message_size;
the_message_queue->maximum_pending_messages = maximum_pending_messages;
the_message_queue->number_of_pending_messages = 0;
f8a8: e3a00000 mov r0, #0
CORE_message_queue_Control *the_message_queue,
CORE_message_queue_Attributes *the_message_queue_attributes,
uint32_t maximum_pending_messages,
size_t maximum_message_size
)
{
f8ac: e1a05002 mov r5, r2
f8b0: e1a07001 mov r7, r1
size_t message_buffering_required = 0;
size_t allocated_message_size;
the_message_queue->maximum_pending_messages = maximum_pending_messages;
f8b4: e5842044 str r2, [r4, #68] ; 0x44
the_message_queue->number_of_pending_messages = 0;
f8b8: e5840048 str r0, [r4, #72] ; 0x48
the_message_queue->maximum_message_size = maximum_message_size;
f8bc: e584304c str r3, [r4, #76] ; 0x4c
/*
* Check if allocated_message_size is aligned to uintptr-size boundary.
* If not, it will increase allocated_message_size to multiplicity of pointer
* size.
*/
if (allocated_message_size & (sizeof(uintptr_t) - 1)) {
f8c0: 01a06003 moveq r6, r3
f8c4: 0a000003 beq f8d8 <_CORE_message_queue_Initialize+0x3c>
allocated_message_size += sizeof(uintptr_t);
f8c8: e2836004 add r6, r3, #4
allocated_message_size &= ~(sizeof(uintptr_t) - 1);
f8cc: e3c66003 bic r6, r6, #3
/*
* Check for an overflow. It can occur while increasing allocated_message_size
* to multiplicity of uintptr_t above.
*/
if (allocated_message_size < maximum_message_size)
f8d0: e1530006 cmp r3, r6
f8d4: 88bd80f0 pophi {r4, r5, r6, r7, pc}
/*
* Calculate how much total memory is required for message buffering and
* check for overflow on the multiplication.
*/
if ( !size_t_mult32_with_overflow(
f8d8: e2866010 add r6, r6, #16
size_t a,
size_t b,
size_t *c
)
{
long long x = (long long)a*b;
f8dc: e0810695 umull r0, r1, r5, r6
if ( x > SIZE_MAX )
f8e0: e3e02000 mvn r2, #0
f8e4: e3a03000 mov r3, #0
f8e8: e1520000 cmp r2, r0
f8ec: e0d3c001 sbcs ip, r3, r1
*/
if ( !size_t_mult32_with_overflow(
(size_t) maximum_pending_messages,
allocated_message_size + sizeof(CORE_message_queue_Buffer_control),
&message_buffering_required ) )
return false;
f8f0: b3a00000 movlt r0, #0
size_t *c
)
{
long long x = (long long)a*b;
if ( x > SIZE_MAX )
f8f4: b8bd80f0 poplt {r4, r5, r6, r7, pc}
/*
* Attempt to allocate the message memory
*/
the_message_queue->message_buffers = (CORE_message_queue_Buffer *)
_Workspace_Allocate( message_buffering_required );
f8f8: eb000bd0 bl 12840 <_Workspace_Allocate>
if (the_message_queue->message_buffers == 0)
f8fc: e3500000 cmp r0, #0
/*
* Attempt to allocate the message memory
*/
the_message_queue->message_buffers = (CORE_message_queue_Buffer *)
_Workspace_Allocate( message_buffering_required );
f900: e1a01000 mov r1, r0
return false;
/*
* Attempt to allocate the message memory
*/
the_message_queue->message_buffers = (CORE_message_queue_Buffer *)
f904: e584005c str r0, [r4, #92] ; 0x5c
_Workspace_Allocate( message_buffering_required );
if (the_message_queue->message_buffers == 0)
f908: 0a000013 beq f95c <_CORE_message_queue_Initialize+0xc0>
/*
* Initialize the pool of inactive messages, pending messages,
* and set of waiting threads.
*/
_Chain_Initialize (
f90c: e2840060 add r0, r4, #96 ; 0x60
f910: e1a02005 mov r2, r5
f914: e1a03006 mov r3, r6
f918: ebffffc6 bl f838 <_Chain_Initialize>
allocated_message_size + sizeof( CORE_message_queue_Buffer_control )
);
_Chain_Initialize_empty( &the_message_queue->Pending_messages );
_Thread_queue_Initialize(
f91c: e5971000 ldr r1, [r7]
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(
Chain_Control *the_chain
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
f920: e2843050 add r3, r4, #80 ; 0x50
f924: e2842054 add r2, r4, #84 ; 0x54
head->next = tail;
head->previous = NULL;
tail->previous = head;
f928: e5843058 str r3, [r4, #88] ; 0x58
f92c: e2413001 sub r3, r1, #1
f930: e2731000 rsbs r1, r3, #0
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
f934: e5842050 str r2, [r4, #80] ; 0x50
head->previous = NULL;
f938: e3a02000 mov r2, #0
f93c: e5842054 str r2, [r4, #84] ; 0x54
f940: e1a00004 mov r0, r4
f944: e0a11003 adc r1, r1, r3
f948: e3a02080 mov r2, #128 ; 0x80
f94c: e3a03006 mov r3, #6
f950: eb000982 bl 11f60 <_Thread_queue_Initialize>
THREAD_QUEUE_DISCIPLINE_PRIORITY : THREAD_QUEUE_DISCIPLINE_FIFO,
STATES_WAITING_FOR_MESSAGE,
CORE_MESSAGE_QUEUE_STATUS_TIMEOUT
);
return true;
f954: e3a00001 mov r0, #1
f958: e8bd80f0 pop {r4, r5, r6, r7, pc}
}
f95c: e8bd80f0 pop {r4, r5, r6, r7, pc} <== NOT EXECUTED
0000baac <_Heap_Allocate_aligned_with_boundary>:
Heap_Control *heap,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
baac: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
bab0: e1a08002 mov r8, r2
Heap_Statistics *const stats = &heap->stats;
uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
- HEAP_ALLOC_BONUS;
uintptr_t const page_size = heap->page_size;
bab4: e5902010 ldr r2, [r0, #16]
Heap_Control *heap,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
bab8: e24dd01c sub sp, sp, #28
babc: e1a06001 mov r6, r1
Heap_Block *block = NULL;
uintptr_t alloc_begin = 0;
uint32_t search_count = 0;
bool search_again = false;
if ( block_size_floor < alloc_size ) {
bac0: e2911004 adds r1, r1, #4
Heap_Control *heap,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
bac4: e1a07000 mov r7, r0
Heap_Block *block = NULL;
uintptr_t alloc_begin = 0;
uint32_t search_count = 0;
bool search_again = false;
if ( block_size_floor < alloc_size ) {
bac8: e58d1000 str r1, [sp]
Heap_Control *heap,
uintptr_t alloc_size,
uintptr_t alignment,
uintptr_t boundary
)
{
bacc: e1a0b003 mov fp, r3
Heap_Statistics *const stats = &heap->stats;
uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
- HEAP_ALLOC_BONUS;
uintptr_t const page_size = heap->page_size;
bad0: e58d200c str r2, [sp, #12]
Heap_Block *block = NULL;
uintptr_t alloc_begin = 0;
uint32_t search_count = 0;
bool search_again = false;
if ( block_size_floor < alloc_size ) {
bad4: 2a000086 bcs bcf4 <_Heap_Allocate_aligned_with_boundary+0x248>
/* Integer overflow occured */
return NULL;
}
if ( boundary != 0 ) {
bad8: e3530000 cmp r3, #0
badc: 1a000078 bne bcc4 <_Heap_Allocate_aligned_with_boundary+0x218>
return &heap->free_list;
}
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{
return _Heap_Free_list_head(heap)->next;
bae0: e597a008 ldr sl, [r7, #8]
do {
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
block = _Heap_Free_list_first( heap );
while ( block != free_list_tail ) {
bae4: e157000a cmp r7, sl
bae8: e3a05000 mov r5, #0
baec: 0a00007b beq bce0 <_Heap_Allocate_aligned_with_boundary+0x234>
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
baf0: e59d300c ldr r3, [sp, #12]
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
baf4: e2662004 rsb r2, r6, #4
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
baf8: e2833007 add r3, r3, #7
bafc: e58d3010 str r3, [sp, #16]
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
bb00: e58d2014 str r2, [sp, #20]
bb04: ea000005 b bb20 <_Heap_Allocate_aligned_with_boundary+0x74>
}
/* Statistics */
++search_count;
if ( alloc_begin != 0 ) {
bb08: e3540000 cmp r4, #0
);
}
}
/* Statistics */
++search_count;
bb0c: e2855001 add r5, r5, #1
if ( alloc_begin != 0 ) {
bb10: 1a00005a bne bc80 <_Heap_Allocate_aligned_with_boundary+0x1d4>
break;
}
block = block->next;
bb14: e59aa008 ldr sl, [sl, #8]
do {
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
block = _Heap_Free_list_first( heap );
while ( block != free_list_tail ) {
bb18: e157000a cmp r7, sl
bb1c: 0a00006f beq bce0 <_Heap_Allocate_aligned_with_boundary+0x234>
/*
* The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
* field. Thus the value is about one unit larger than the real block
* size. The greater than operator takes this into account.
*/
if ( block->size_and_flag > block_size_floor ) {
bb20: e59a9004 ldr r9, [sl, #4]
bb24: e59d3000 ldr r3, [sp]
bb28: e1530009 cmp r3, r9
);
}
}
/* Statistics */
++search_count;
bb2c: 22855001 addcs r5, r5, #1
/*
* The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
* field. Thus the value is about one unit larger than the real block
* size. The greater than operator takes this into account.
*/
if ( block->size_and_flag > block_size_floor ) {
bb30: 2afffff7 bcs bb14 <_Heap_Allocate_aligned_with_boundary+0x68>
if ( alignment == 0 ) {
bb34: e3580000 cmp r8, #0
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
const Heap_Block *block
)
{
return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
bb38: 028a4008 addeq r4, sl, #8
bb3c: 0afffff1 beq bb08 <_Heap_Allocate_aligned_with_boundary+0x5c>
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
uintptr_t alloc_begin = alloc_end - alloc_size;
bb40: e59d3014 ldr r3, [sp, #20]
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
bb44: e3c99001 bic r9, r9, #1
uintptr_t alignment,
uintptr_t boundary
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
bb48: e5972014 ldr r2, [r7, #20]
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
bb4c: e08a9009 add r9, sl, r9
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
bb50: e59d1010 ldr r1, [sp, #16]
uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
uintptr_t alloc_begin = alloc_end - alloc_size;
bb54: e0834009 add r4, r3, r9
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
{
return value - (value % alignment);
bb58: e1a00004 mov r0, r4
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
+ HEAP_BLOCK_HEADER_SIZE + page_size - 1;
bb5c: e0623001 rsb r3, r2, r1
bb60: e1a01008 mov r1, r8
uintptr_t alignment,
uintptr_t boundary
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
bb64: e58d2004 str r2, [sp, #4]
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
uintptr_t const block_end = block_begin + block_size;
uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
uintptr_t const alloc_begin_ceiling = block_end - min_block_size
bb68: e0839009 add r9, r3, r9
bb6c: eb002b54 bl 168c4 <__umodsi3>
bb70: e0604004 rsb r4, r0, r4
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
const Heap_Block *block
)
{
return (uintptr_t) block + HEAP_BLOCK_HEADER_SIZE;
bb74: e28a2008 add r2, sl, #8
uintptr_t alloc_begin = alloc_end - alloc_size;
alloc_begin = _Heap_Align_down( alloc_begin, alignment );
/* Ensure that the we have a valid new block at the end */
if ( alloc_begin > alloc_begin_ceiling ) {
bb78: e1590004 cmp r9, r4
bb7c: e58d2008 str r2, [sp, #8]
bb80: 2a000003 bcs bb94 <_Heap_Allocate_aligned_with_boundary+0xe8>
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
{
return value - (value % alignment);
bb84: e1a00009 mov r0, r9
bb88: e1a01008 mov r1, r8
bb8c: eb002b4c bl 168c4 <__umodsi3>
bb90: e0604009 rsb r4, r0, r9
}
alloc_end = alloc_begin + alloc_size;
/* Ensure boundary constaint */
if ( boundary != 0 ) {
bb94: e35b0000 cmp fp, #0
bb98: 0a000025 beq bc34 <_Heap_Allocate_aligned_with_boundary+0x188>
/* Ensure that the we have a valid new block at the end */
if ( alloc_begin > alloc_begin_ceiling ) {
alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
}
alloc_end = alloc_begin + alloc_size;
bb9c: e0849006 add r9, r4, r6
bba0: e1a00009 mov r0, r9
bba4: e1a0100b mov r1, fp
bba8: eb002b45 bl 168c4 <__umodsi3>
bbac: e0600009 rsb r0, r0, r9
/* Ensure boundary constaint */
if ( boundary != 0 ) {
uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
bbb0: e1540000 cmp r4, r0
bbb4: 23a03000 movcs r3, #0
bbb8: 33a03001 movcc r3, #1
bbbc: e1590000 cmp r9, r0
bbc0: 93a03000 movls r3, #0
bbc4: e3530000 cmp r3, #0
bbc8: 0a000019 beq bc34 <_Heap_Allocate_aligned_with_boundary+0x188>
alloc_end = alloc_begin + alloc_size;
/* Ensure boundary constaint */
if ( boundary != 0 ) {
uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
bbcc: e59d3008 ldr r3, [sp, #8]
bbd0: e0839006 add r9, r3, r6
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
if ( boundary_line < boundary_floor ) {
bbd4: e1590000 cmp r9, r0
bbd8: 958da018 strls sl, [sp, #24]
bbdc: 9a000002 bls bbec <_Heap_Allocate_aligned_with_boundary+0x140>
bbe0: ea00003c b bcd8 <_Heap_Allocate_aligned_with_boundary+0x22c>
bbe4: e1590000 cmp r9, r0
bbe8: 8a00003e bhi bce8 <_Heap_Allocate_aligned_with_boundary+0x23c>
return 0;
}
alloc_begin = boundary_line - alloc_size;
bbec: e0664000 rsb r4, r6, r0
bbf0: e1a01008 mov r1, r8
bbf4: e1a00004 mov r0, r4
bbf8: eb002b31 bl 168c4 <__umodsi3>
bbfc: e0604004 rsb r4, r0, r4
alloc_begin = _Heap_Align_down( alloc_begin, alignment );
alloc_end = alloc_begin + alloc_size;
bc00: e084a006 add sl, r4, r6
bc04: e1a0000a mov r0, sl
bc08: e1a0100b mov r1, fp
bc0c: eb002b2c bl 168c4 <__umodsi3>
bc10: e060000a rsb r0, r0, sl
/* Ensure boundary constaint */
if ( boundary != 0 ) {
uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
bc14: e15a0000 cmp sl, r0
bc18: 93a0a000 movls sl, #0
bc1c: 83a0a001 movhi sl, #1
bc20: e1540000 cmp r4, r0
bc24: 23a0a000 movcs sl, #0
bc28: e35a0000 cmp sl, #0
bc2c: 1affffec bne bbe4 <_Heap_Allocate_aligned_with_boundary+0x138>
bc30: e59da018 ldr sl, [sp, #24]
boundary_line = _Heap_Align_down( alloc_end, boundary );
}
}
/* Ensure that the we have a valid new block at the beginning */
if ( alloc_begin >= alloc_begin_floor ) {
bc34: e59d2008 ldr r2, [sp, #8]
bc38: e1520004 cmp r2, r4
bc3c: 8a000025 bhi bcd8 <_Heap_Allocate_aligned_with_boundary+0x22c>
bc40: e1a00004 mov r0, r4
bc44: e59d100c ldr r1, [sp, #12]
bc48: eb002b1d bl 168c4 <__umodsi3>
bc4c: e3e09007 mvn r9, #7
bc50: e06a9009 rsb r9, sl, r9
uintptr_t alloc_begin,
uintptr_t page_size
)
{
return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
- HEAP_BLOCK_HEADER_SIZE);
bc54: e0899004 add r9, r9, r4
if ( free_size >= min_block_size || free_size == 0 ) {
return alloc_begin;
}
}
return 0;
bc58: e59d2004 ldr r2, [sp, #4]
if ( alloc_begin >= alloc_begin_floor ) {
uintptr_t const alloc_block_begin =
(uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
uintptr_t const free_size = alloc_block_begin - block_begin;
if ( free_size >= min_block_size || free_size == 0 ) {
bc5c: e0603009 rsb r3, r0, r9
return alloc_begin;
}
}
return 0;
bc60: e1590000 cmp r9, r0
bc64: 11520003 cmpne r2, r3
bc68: 83a03000 movhi r3, #0
bc6c: 93a03001 movls r3, #1
bc70: 81a04003 movhi r4, r3
}
/* Statistics */
++search_count;
if ( alloc_begin != 0 ) {
bc74: e3540000 cmp r4, #0
);
}
}
/* Statistics */
++search_count;
bc78: e2855001 add r5, r5, #1
if ( alloc_begin != 0 ) {
bc7c: 0affffa4 beq bb14 <_Heap_Allocate_aligned_with_boundary+0x68>
} while ( search_again );
if ( alloc_begin != 0 ) {
/* Statistics */
++stats->allocs;
stats->searches += search_count;
bc80: e2872048 add r2, r7, #72 ; 0x48
bc84: e892000c ldm r2, {r2, r3}
search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
} while ( search_again );
if ( alloc_begin != 0 ) {
/* Statistics */
++stats->allocs;
bc88: e2822001 add r2, r2, #1
stats->searches += search_count;
bc8c: e0833005 add r3, r3, r5
search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
} while ( search_again );
if ( alloc_begin != 0 ) {
/* Statistics */
++stats->allocs;
bc90: e5872048 str r2, [r7, #72] ; 0x48
stats->searches += search_count;
bc94: e587304c str r3, [r7, #76] ; 0x4c
block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
bc98: e1a00007 mov r0, r7
bc9c: e1a0100a mov r1, sl
bca0: e1a02004 mov r2, r4
bca4: e1a03006 mov r3, r6
bca8: ebffef05 bl 78c4 <_Heap_Block_allocate>
boundary
);
}
/* Statistics */
if ( stats->max_search < search_count ) {
bcac: e5973044 ldr r3, [r7, #68] ; 0x44
bcb0: e1530005 cmp r3, r5
stats->max_search = search_count;
bcb4: 35875044 strcc r5, [r7, #68] ; 0x44
}
return (void *) alloc_begin;
bcb8: e1a00004 mov r0, r4
}
bcbc: e28dd01c add sp, sp, #28
bcc0: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}
/* Integer overflow occured */
return NULL;
}
if ( boundary != 0 ) {
if ( boundary < alloc_size ) {
bcc4: e1560003 cmp r6, r3
bcc8: 8a000009 bhi bcf4 <_Heap_Allocate_aligned_with_boundary+0x248>
return NULL;
}
if ( alignment == 0 ) {
alignment = page_size;
bccc: e3580000 cmp r8, #0
bcd0: 01a08002 moveq r8, r2
bcd4: eaffff81 b bae0 <_Heap_Allocate_aligned_with_boundary+0x34>
if ( free_size >= min_block_size || free_size == 0 ) {
return alloc_begin;
}
}
return 0;
bcd8: e3a04000 mov r4, #0
bcdc: eaffff89 b bb08 <_Heap_Allocate_aligned_with_boundary+0x5c>
do {
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
block = _Heap_Free_list_first( heap );
while ( block != free_list_tail ) {
bce0: e3a04000 mov r4, #0
bce4: eafffff0 b bcac <_Heap_Allocate_aligned_with_boundary+0x200>
bce8: e59da018 ldr sl, [sp, #24] <== NOT EXECUTED
uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
if ( boundary_line < boundary_floor ) {
return 0;
bcec: e3a04000 mov r4, #0 <== NOT EXECUTED
bcf0: eaffff84 b bb08 <_Heap_Allocate_aligned_with_boundary+0x5c><== NOT EXECUTED
return NULL;
}
if ( boundary != 0 ) {
if ( boundary < alloc_size ) {
return NULL;
bcf4: e3a00000 mov r0, #0
bcf8: eaffffef b bcbc <_Heap_Allocate_aligned_with_boundary+0x210>
0000bcfc <_Heap_Free>:
/*
* If NULL return true so a free on NULL is considered a valid release. This
* is a special case that could be handled by the in heap check how-ever that
* would result in false being returned which is wrong.
*/
if ( alloc_begin_ptr == NULL ) {
bcfc: e2513000 subs r3, r1, #0
return do_free;
}
#endif
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{
bd00: e92d45f0 push {r4, r5, r6, r7, r8, sl, lr}
bd04: e1a04000 mov r4, r0
* If NULL return true so a free on NULL is considered a valid release. This
* is a special case that could be handled by the in heap check how-ever that
* would result in false being returned which is wrong.
*/
if ( alloc_begin_ptr == NULL ) {
return true;
bd08: 03a00001 moveq r0, #1
/*
* If NULL return true so a free on NULL is considered a valid release. This
* is a special case that could be handled by the in heap check how-ever that
* would result in false being returned which is wrong.
*/
if ( alloc_begin_ptr == NULL ) {
bd0c: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
{
return value - (value % alignment);
bd10: e1a00003 mov r0, r3
bd14: e5941010 ldr r1, [r4, #16]
bd18: e2435008 sub r5, r3, #8
bd1c: eb002ae8 bl 168c4 <__umodsi3>
RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
bd20: e5942020 ldr r2, [r4, #32]
uintptr_t alloc_begin,
uintptr_t page_size
)
{
return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
- HEAP_BLOCK_HEADER_SIZE);
bd24: e0605005 rsb r5, r0, r5
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
bd28: e1550002 cmp r5, r2
bd2c: 3a00002f bcc bdf0 <_Heap_Free+0xf4>
bd30: e5941024 ldr r1, [r4, #36] ; 0x24
bd34: e1550001 cmp r5, r1
bd38: 8a00002c bhi bdf0 <_Heap_Free+0xf4>
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
bd3c: e595c004 ldr ip, [r5, #4]
bd40: e3cc6001 bic r6, ip, #1
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
bd44: e0853006 add r3, r5, r6
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
bd48: e1520003 cmp r2, r3
bd4c: 8a000027 bhi bdf0 <_Heap_Free+0xf4>
bd50: e1510003 cmp r1, r3
bd54: 3a000027 bcc bdf8 <_Heap_Free+0xfc>
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
bd58: e5937004 ldr r7, [r3, #4]
return false;
}
_Heap_Protection_block_check( heap, next_block );
if ( !_Heap_Is_prev_used( next_block ) ) {
bd5c: e2170001 ands r0, r7, #1
bd60: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
return true;
}
next_block_size = _Heap_Block_size( next_block );
next_is_free = next_block != heap->last_block
&& !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
bd64: e1510003 cmp r1, r3
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
bd68: e3c77001 bic r7, r7, #1
bd6c: 03a08000 moveq r8, #0
bd70: 0a000004 beq bd88 <_Heap_Free+0x8c>
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
bd74: e0830007 add r0, r3, r7
bd78: e5900004 ldr r0, [r0, #4]
return do_free;
}
#endif
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
bd7c: e3100001 tst r0, #1
bd80: 13a08000 movne r8, #0
bd84: 03a08001 moveq r8, #1
next_block_size = _Heap_Block_size( next_block );
next_is_free = next_block != heap->last_block
&& !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
if ( !_Heap_Is_prev_used( block ) ) {
bd88: e21c0001 ands r0, ip, #1
bd8c: 1a00001b bne be00 <_Heap_Free+0x104>
uintptr_t const prev_size = block->prev_size;
bd90: e595c000 ldr ip, [r5]
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
bd94: e06ca005 rsb sl, ip, r5
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
bd98: e152000a cmp r2, sl
bd9c: 88bd85f0 pophi {r4, r5, r6, r7, r8, sl, pc}
bda0: e151000a cmp r1, sl
bda4: 38bd85f0 popcc {r4, r5, r6, r7, r8, sl, pc}
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
bda8: e59a0004 ldr r0, [sl, #4]
return( false );
}
/* As we always coalesce free blocks, the block that preceedes prev_block
must have been used. */
if ( !_Heap_Is_prev_used ( prev_block) ) {
bdac: e2100001 ands r0, r0, #1
bdb0: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
_HAssert( false );
return( false );
}
if ( next_is_free ) { /* coalesce both */
bdb4: e3580000 cmp r8, #0
bdb8: 0a000039 beq bea4 <_Heap_Free+0x1a8>
uintptr_t const size = block_size + prev_size + next_block_size;
_Heap_Free_list_remove( next_block );
stats->free_blocks -= 1;
bdbc: e5940038 ldr r0, [r4, #56] ; 0x38
_HAssert( false );
return( false );
}
if ( next_is_free ) { /* coalesce both */
uintptr_t const size = block_size + prev_size + next_block_size;
bdc0: e0867007 add r7, r6, r7
}
RTEMS_INLINE_ROUTINE void _Heap_Free_list_remove( Heap_Block *block )
{
Heap_Block *next = block->next;
Heap_Block *prev = block->prev;
bdc4: e2832008 add r2, r3, #8
bdc8: e892000c ldm r2, {r2, r3}
bdcc: e087c00c add ip, r7, ip
_Heap_Free_list_remove( next_block );
stats->free_blocks -= 1;
bdd0: e2400001 sub r0, r0, #1
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
bdd4: e38c1001 orr r1, ip, #1
prev->next = next;
bdd8: e5832008 str r2, [r3, #8]
next->prev = prev;
bddc: e582300c str r3, [r2, #12]
}
if ( next_is_free ) { /* coalesce both */
uintptr_t const size = block_size + prev_size + next_block_size;
_Heap_Free_list_remove( next_block );
stats->free_blocks -= 1;
bde0: e5840038 str r0, [r4, #56] ; 0x38
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
bde4: e58a1004 str r1, [sl, #4]
next_block = _Heap_Block_at( prev_block, size );
_HAssert(!_Heap_Is_prev_used( next_block));
next_block->prev_size = size;
bde8: e78ac00c str ip, [sl, ip]
bdec: ea00000f b be30 <_Heap_Free+0x134>
block_size = _Heap_Block_size( block );
next_block = _Heap_Block_at( block, block_size );
if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
return false;
bdf0: e3a00000 mov r0, #0
bdf4: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
bdf8: e3a00000 mov r0, #0 <== NOT EXECUTED
--stats->used_blocks;
++stats->frees;
stats->free_size += block_size;
return( true );
}
bdfc: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc} <== NOT EXECUTED
uintptr_t const size = block_size + prev_size;
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = size;
}
} else if ( next_is_free ) { /* coalesce next */
be00: e3580000 cmp r8, #0
be04: 0a000014 beq be5c <_Heap_Free+0x160>
Heap_Block *old_block,
Heap_Block *new_block
)
{
Heap_Block *next = old_block->next;
Heap_Block *prev = old_block->prev;
be08: e2832008 add r2, r3, #8
be0c: e892000c ldm r2, {r2, r3}
uintptr_t const size = block_size + next_block_size;
be10: e0877006 add r7, r7, r6
_Heap_Free_list_replace( next_block, block );
block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
be14: e3871001 orr r1, r7, #1
new_block->next = next;
be18: e5852008 str r2, [r5, #8]
new_block->prev = prev;
be1c: e585300c str r3, [r5, #12]
next->prev = new_block;
prev->next = new_block;
be20: e5835008 str r5, [r3, #8]
Heap_Block *prev = old_block->prev;
new_block->next = next;
new_block->prev = prev;
next->prev = new_block;
be24: e582500c str r5, [r2, #12]
be28: e5851004 str r1, [r5, #4]
next_block = _Heap_Block_at( block, size );
next_block->prev_size = size;
be2c: e7857007 str r7, [r5, r7]
stats->max_free_blocks = stats->free_blocks;
}
}
/* Statistics */
--stats->used_blocks;
be30: e5942040 ldr r2, [r4, #64] ; 0x40
++stats->frees;
be34: e5943050 ldr r3, [r4, #80] ; 0x50
stats->free_size += block_size;
be38: e5941030 ldr r1, [r4, #48] ; 0x30
stats->max_free_blocks = stats->free_blocks;
}
}
/* Statistics */
--stats->used_blocks;
be3c: e2422001 sub r2, r2, #1
++stats->frees;
be40: e2833001 add r3, r3, #1
stats->free_size += block_size;
be44: e0816006 add r6, r1, r6
stats->max_free_blocks = stats->free_blocks;
}
}
/* Statistics */
--stats->used_blocks;
be48: e5842040 str r2, [r4, #64] ; 0x40
++stats->frees;
be4c: e5843050 str r3, [r4, #80] ; 0x50
stats->free_size += block_size;
be50: e5846030 str r6, [r4, #48] ; 0x30
return( true );
be54: e3a00001 mov r0, #1
be58: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
next_block->prev_size = size;
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
_Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
be5c: e3862001 orr r2, r6, #1
be60: e5852004 str r2, [r5, #4]
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
next_block->prev_size = block_size;
/* Statistics */
++stats->free_blocks;
be64: e5942038 ldr r2, [r4, #56] ; 0x38
if ( stats->max_free_blocks < stats->free_blocks ) {
be68: e594c03c ldr ip, [r4, #60] ; 0x3c
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
_Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
be6c: e5930004 ldr r0, [r3, #4]
RTEMS_INLINE_ROUTINE void _Heap_Free_list_insert_after(
Heap_Block *block_before,
Heap_Block *new_block
)
{
Heap_Block *next = block_before->next;
be70: e5941008 ldr r1, [r4, #8]
next_block->prev_size = block_size;
/* Statistics */
++stats->free_blocks;
be74: e2822001 add r2, r2, #1
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
_Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
be78: e3c00001 bic r0, r0, #1
next_block->prev_size = block_size;
/* Statistics */
++stats->free_blocks;
if ( stats->max_free_blocks < stats->free_blocks ) {
be7c: e152000c cmp r2, ip
new_block->next = next;
be80: e5851008 str r1, [r5, #8]
new_block->prev = block_before;
be84: e585400c str r4, [r5, #12]
} else { /* no coalesce */
/* Add 'block' to the head of the free blocks list as it tends to
produce less fragmentation than adding to the tail. */
_Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
be88: e5830004 str r0, [r3, #4]
block_before->next = new_block;
next->prev = new_block;
be8c: e581500c str r5, [r1, #12]
next_block->prev_size = block_size;
be90: e7856006 str r6, [r5, r6]
{
Heap_Block *next = block_before->next;
new_block->next = next;
new_block->prev = block_before;
block_before->next = new_block;
be94: e5845008 str r5, [r4, #8]
/* Statistics */
++stats->free_blocks;
be98: e5842038 str r2, [r4, #56] ; 0x38
if ( stats->max_free_blocks < stats->free_blocks ) {
stats->max_free_blocks = stats->free_blocks;
be9c: 8584203c strhi r2, [r4, #60] ; 0x3c
bea0: eaffffe2 b be30 <_Heap_Free+0x134>
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
next_block = _Heap_Block_at( prev_block, size );
_HAssert(!_Heap_Is_prev_used( next_block));
next_block->prev_size = size;
} else { /* coalesce prev */
uintptr_t const size = block_size + prev_size;
bea4: e086c00c add ip, r6, ip
prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
bea8: e38c2001 orr r2, ip, #1
beac: e58a2004 str r2, [sl, #4]
next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
beb0: e5932004 ldr r2, [r3, #4]
beb4: e3c22001 bic r2, r2, #1
beb8: e5832004 str r2, [r3, #4]
next_block->prev_size = size;
bebc: e785c006 str ip, [r5, r6]
bec0: eaffffda b be30 <_Heap_Free+0x134>
0000961c <_Heap_Greedy_allocate>:
Heap_Block *_Heap_Greedy_allocate(
Heap_Control *heap,
const uintptr_t *block_sizes,
size_t block_count
)
{
961c: e92d45f0 push {r4, r5, r6, r7, r8, sl, lr}
Heap_Block *allocated_blocks = NULL;
Heap_Block *blocks = NULL;
Heap_Block *current;
size_t i;
for (i = 0; i < block_count; ++i) {
9620: e2528000 subs r8, r2, #0
Heap_Block *_Heap_Greedy_allocate(
Heap_Control *heap,
const uintptr_t *block_sizes,
size_t block_count
)
{
9624: e1a04000 mov r4, r0
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
Heap_Block *allocated_blocks = NULL;
9628: 01a05008 moveq r5, r8
Heap_Block *blocks = NULL;
Heap_Block *current;
size_t i;
for (i = 0; i < block_count; ++i) {
962c: 0a000012 beq 967c <_Heap_Greedy_allocate+0x60>
9630: e3a06000 mov r6, #0
9634: e1a07001 mov r7, r1
const uintptr_t *block_sizes,
size_t block_count
)
{
Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
Heap_Block *allocated_blocks = NULL;
9638: e1a05006 mov r5, r6
* @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
* boundary equals zero.
*/
RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
{
return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
963c: e3a02000 mov r2, #0
9640: e4971004 ldr r1, [r7], #4
9644: e1a03002 mov r3, r2
9648: e1a00004 mov r0, r4
964c: eb001be1 bl 105d8 <_Heap_Allocate_aligned_with_boundary>
size_t i;
for (i = 0; i < block_count; ++i) {
void *next = _Heap_Allocate( heap, block_sizes [i] );
if ( next != NULL ) {
9650: e250a000 subs sl, r0, #0
Heap_Block *allocated_blocks = NULL;
Heap_Block *blocks = NULL;
Heap_Block *current;
size_t i;
for (i = 0; i < block_count; ++i) {
9654: e2866001 add r6, r6, #1
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
{
return value - (value % alignment);
9658: e24aa008 sub sl, sl, #8
void *next = _Heap_Allocate( heap, block_sizes [i] );
if ( next != NULL ) {
965c: 0a000004 beq 9674 <_Heap_Greedy_allocate+0x58>
9660: e5941010 ldr r1, [r4, #16]
9664: eb004755 bl 1b3c0 <__umodsi3>
uintptr_t alloc_begin,
uintptr_t page_size
)
{
return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
- HEAP_BLOCK_HEADER_SIZE);
9668: e060000a rsb r0, r0, sl
Heap_Block *next_block = _Heap_Block_of_alloc_area(
(uintptr_t) next,
heap->page_size
);
next_block->next = allocated_blocks;
966c: e5805008 str r5, [r0, #8]
9670: e1a05000 mov r5, r0
Heap_Block *allocated_blocks = NULL;
Heap_Block *blocks = NULL;
Heap_Block *current;
size_t i;
for (i = 0; i < block_count; ++i) {
9674: e1560008 cmp r6, r8
9678: 1affffef bne 963c <_Heap_Greedy_allocate+0x20>
return &heap->free_list;
}
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{
return _Heap_Free_list_head(heap)->next;
967c: e5946008 ldr r6, [r4, #8]
next_block->next = allocated_blocks;
allocated_blocks = next_block;
}
}
while ( (current = _Heap_Free_list_first( heap )) != free_list_tail ) {
9680: e1540006 cmp r4, r6
9684: 13a07000 movne r7, #0
9688: 1a000002 bne 9698 <_Heap_Greedy_allocate+0x7c>
968c: ea000018 b 96f4 <_Heap_Greedy_allocate+0xd8> <== NOT EXECUTED
9690: e1a07006 mov r7, r6
9694: e1a06003 mov r6, r3
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
9698: e5963004 ldr r3, [r6, #4]
969c: e3c33001 bic r3, r3, #1
_Heap_Block_allocate(
96a0: e2433008 sub r3, r3, #8
96a4: e1a01006 mov r1, r6
96a8: e2862008 add r2, r6, #8
96ac: e1a00004 mov r0, r4
96b0: eb0000d0 bl 99f8 <_Heap_Block_allocate>
current,
_Heap_Alloc_area_of_block( current ),
_Heap_Block_size( current ) - HEAP_BLOCK_HEADER_SIZE
);
current->next = blocks;
96b4: e5867008 str r7, [r6, #8]
return &heap->free_list;
}
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_first( Heap_Control *heap )
{
return _Heap_Free_list_head(heap)->next;
96b8: e5943008 ldr r3, [r4, #8]
next_block->next = allocated_blocks;
allocated_blocks = next_block;
}
}
while ( (current = _Heap_Free_list_first( heap )) != free_list_tail ) {
96bc: e1540003 cmp r4, r3
96c0: 1afffff2 bne 9690 <_Heap_Greedy_allocate+0x74>
current->next = blocks;
blocks = current;
}
while ( allocated_blocks != NULL ) {
96c4: e3550000 cmp r5, #0
96c8: 1a000001 bne 96d4 <_Heap_Greedy_allocate+0xb8>
96cc: ea000006 b 96ec <_Heap_Greedy_allocate+0xd0>
current = allocated_blocks;
allocated_blocks = allocated_blocks->next;
96d0: e1a05007 mov r5, r7
96d4: e1a01005 mov r1, r5
96d8: e5b17008 ldr r7, [r1, #8]!
_Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( current ) );
96dc: e1a00004 mov r0, r4
96e0: eb001c50 bl 10828 <_Heap_Free>
current->next = blocks;
blocks = current;
}
while ( allocated_blocks != NULL ) {
96e4: e3570000 cmp r7, #0
96e8: 1afffff8 bne 96d0 <_Heap_Greedy_allocate+0xb4>
allocated_blocks = allocated_blocks->next;
_Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( current ) );
}
return blocks;
}
96ec: e1a00006 mov r0, r6
96f0: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
next_block->next = allocated_blocks;
allocated_blocks = next_block;
}
}
while ( (current = _Heap_Free_list_first( heap )) != free_list_tail ) {
96f4: e3a06000 mov r6, #0 <== NOT EXECUTED
96f8: eafffff1 b 96c4 <_Heap_Greedy_allocate+0xa8> <== NOT EXECUTED
0001136c <_Heap_Iterate>:
Heap_Block_visitor visitor,
void *visitor_arg
)
{
Heap_Block *current = heap->first_block;
Heap_Block *end = heap->last_block;
1136c: e2800020 add r0, r0, #32
void _Heap_Iterate(
Heap_Control *heap,
Heap_Block_visitor visitor,
void *visitor_arg
)
{
11370: e92d40f0 push {r4, r5, r6, r7, lr}
Heap_Block *current = heap->first_block;
Heap_Block *end = heap->last_block;
11374: e8900021 ldm r0, {r0, r5}
bool stop = false;
while ( !stop && current != end ) {
11378: e1500005 cmp r0, r5
void _Heap_Iterate(
Heap_Control *heap,
Heap_Block_visitor visitor,
void *visitor_arg
)
{
1137c: e1a07001 mov r7, r1
11380: e1a06002 mov r6, r2
Heap_Block *current = heap->first_block;
Heap_Block *end = heap->last_block;
bool stop = false;
while ( !stop && current != end ) {
11384: 1a000001 bne 11390 <_Heap_Iterate+0x24>
11388: e8bd80f0 pop {r4, r5, r6, r7, pc} <== NOT EXECUTED
1138c: e1a00004 mov r0, r4
11390: e5901004 ldr r1, [r0, #4]
11394: e3c11001 bic r1, r1, #1
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
11398: e0804001 add r4, r0, r1
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
1139c: e5942004 ldr r2, [r4, #4]
uintptr_t size = _Heap_Block_size( current );
Heap_Block *next = _Heap_Block_at( current, size );
bool used = _Heap_Is_prev_used( next );
stop = (*visitor)( current, size, used, visitor_arg );
113a0: e1a03006 mov r3, r6
113a4: e2022001 and r2, r2, #1
113a8: e1a0e00f mov lr, pc
113ac: e12fff17 bx r7
{
Heap_Block *current = heap->first_block;
Heap_Block *end = heap->last_block;
bool stop = false;
while ( !stop && current != end ) {
113b0: e3500000 cmp r0, #0
113b4: 18bd80f0 popne {r4, r5, r6, r7, pc}
113b8: e1550004 cmp r5, r4
113bc: 1afffff2 bne 1138c <_Heap_Iterate+0x20>
113c0: e8bd80f0 pop {r4, r5, r6, r7, pc}
0000bff8 <_Heap_Size_of_alloc_area>:
bool _Heap_Size_of_alloc_area(
Heap_Control *heap,
void *alloc_begin_ptr,
uintptr_t *alloc_size
)
{
bff8: e92d4070 push {r4, r5, r6, lr}
bffc: e1a04000 mov r4, r0
c000: e1a05001 mov r5, r1
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
uintptr_t value,
uintptr_t alignment
)
{
return value - (value % alignment);
c004: e1a00001 mov r0, r1
c008: e5941010 ldr r1, [r4, #16]
c00c: e1a06002 mov r6, r2
c010: eb002a2b bl 168c4 <__umodsi3>
c014: e2452008 sub r2, r5, #8
RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
c018: e5943020 ldr r3, [r4, #32]
uintptr_t alloc_begin,
uintptr_t page_size
)
{
return (Heap_Block *) (_Heap_Align_down( alloc_begin, page_size )
- HEAP_BLOCK_HEADER_SIZE);
c01c: e0602002 rsb r2, r0, r2
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
c020: e1520003 cmp r2, r3
c024: 3a000010 bcc c06c <_Heap_Size_of_alloc_area+0x74>
c028: e5941024 ldr r1, [r4, #36] ; 0x24
c02c: e1520001 cmp r2, r1
c030: 8a00000d bhi c06c <_Heap_Size_of_alloc_area+0x74>
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
c034: e5920004 ldr r0, [r2, #4]
c038: e3c00001 bic r0, r0, #1
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
c03c: e0822000 add r2, r2, r0
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
c040: e1530002 cmp r3, r2
c044: 8a000008 bhi c06c <_Heap_Size_of_alloc_area+0x74>
c048: e1510002 cmp r1, r2
c04c: 3a000008 bcc c074 <_Heap_Size_of_alloc_area+0x7c>
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
c050: e5920004 ldr r0, [r2, #4]
block_size = _Heap_Block_size( block );
next_block = _Heap_Block_at( block, block_size );
if (
!_Heap_Is_block_in_heap( heap, next_block )
|| !_Heap_Is_prev_used( next_block )
c054: e2100001 ands r0, r0, #1
) {
return false;
}
*alloc_size = (uintptr_t) next_block + HEAP_ALLOC_BONUS - alloc_begin;
c058: 12655004 rsbne r5, r5, #4
c05c: 10852002 addne r2, r5, r2
c060: 15862000 strne r2, [r6]
return true;
c064: 13a00001 movne r0, #1
c068: e8bd8070 pop {r4, r5, r6, pc}
if (
!_Heap_Is_block_in_heap( heap, next_block )
|| !_Heap_Is_prev_used( next_block )
) {
return false;
c06c: e3a00000 mov r0, #0
c070: e8bd8070 pop {r4, r5, r6, pc}
c074: e3a00000 mov r0, #0 <== NOT EXECUTED
}
*alloc_size = (uintptr_t) next_block + HEAP_ALLOC_BONUS - alloc_begin;
return true;
}
c078: e8bd8070 pop {r4, r5, r6, pc} <== NOT EXECUTED
00007a04 <_Internal_error_Occurred>:
void _Internal_error_Occurred(
Internal_errors_Source the_source,
bool is_internal,
Internal_errors_t the_error
)
{
7a04: e52de004 push {lr} ; (str lr, [sp, #-4]!)
7a08: e24dd00c sub sp, sp, #12
7a0c: e1a04000 mov r4, r0
7a10: e20160ff and r6, r1, #255 ; 0xff
Internal_errors_t error
)
{
User_extensions_Fatal_context ctx = { source, is_internal, error };
_User_extensions_Iterate( &ctx, _User_extensions_Fatal_visitor );
7a14: e1a0000d mov r0, sp
7a18: e59f1040 ldr r1, [pc, #64] ; 7a60 <_Internal_error_Occurred+0x5c>
7a1c: e1a05002 mov r5, r2
Internal_errors_Source source,
bool is_internal,
Internal_errors_t error
)
{
User_extensions_Fatal_context ctx = { source, is_internal, error };
7a20: e58d2008 str r2, [sp, #8]
7a24: e58d4000 str r4, [sp]
7a28: e5cd6004 strb r6, [sp, #4]
_User_extensions_Iterate( &ctx, _User_extensions_Fatal_visitor );
7a2c: eb0007be bl 992c <_User_extensions_Iterate>
_User_extensions_Fatal( the_source, is_internal, the_error );
_Internal_errors_What_happened.the_source = the_source;
7a30: e59f302c ldr r3, [pc, #44] ; 7a64 <_Internal_error_Occurred+0x60><== NOT EXECUTED
7a34: e5834000 str r4, [r3] <== NOT EXECUTED
_Internal_errors_What_happened.is_internal = is_internal;
7a38: e5c36004 strb r6, [r3, #4] <== NOT EXECUTED
_Internal_errors_What_happened.the_error = the_error;
7a3c: e5835008 str r5, [r3, #8] <== NOT EXECUTED
RTEMS_INLINE_ROUTINE void _System_state_Set (
System_state_Codes state
)
{
_System_state_Current = state;
7a40: e59f3020 ldr r3, [pc, #32] ; 7a68 <_Internal_error_Occurred+0x64><== NOT EXECUTED
7a44: e3a02005 mov r2, #5 <== NOT EXECUTED
7a48: e5832000 str r2, [r3] <== NOT EXECUTED
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
7a4c: e10f2000 mrs r2, CPSR <== NOT EXECUTED
7a50: e3823080 orr r3, r2, #128 ; 0x80 <== NOT EXECUTED
7a54: e129f003 msr CPSR_fc, r3 <== NOT EXECUTED
_System_state_Set( SYSTEM_STATE_FAILED );
_CPU_Fatal_halt( the_error );
7a58: e1a00005 mov r0, r5 <== NOT EXECUTED
7a5c: eafffffe b 7a5c <_Internal_error_Occurred+0x58> <== NOT EXECUTED
00007b20 <_Objects_Extend_information>:
*/
void _Objects_Extend_information(
Objects_Information *information
)
{
7b20: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
minimum_index = _Objects_Get_index( information->minimum_id );
index_base = minimum_index;
block = 0;
/* if ( information->maximum < minimum_index ) */
if ( information->object_blocks == NULL )
7b24: e5904034 ldr r4, [r0, #52] ; 0x34
7b28: e3540000 cmp r4, #0
*/
void _Objects_Extend_information(
Objects_Information *information
)
{
7b2c: e24dd014 sub sp, sp, #20
7b30: e1a05000 mov r5, r0
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
minimum_index = _Objects_Get_index( information->minimum_id );
7b34: e1d070b8 ldrh r7, [r0, #8]
index_base = minimum_index;
block = 0;
/* if ( information->maximum < minimum_index ) */
if ( information->object_blocks == NULL )
7b38: e1d081b0 ldrh r8, [r0, #16]
7b3c: 0a0000a0 beq 7dc4 <_Objects_Extend_information+0x2a4>
block_count = 0;
else {
block_count = information->maximum / information->allocation_size;
7b40: e1d0a1b4 ldrh sl, [r0, #20]
7b44: e1a00008 mov r0, r8
7b48: e1a0100a mov r1, sl
7b4c: eb003b16 bl 167ac <__aeabi_uidiv>
7b50: e1a0b800 lsl fp, r0, #16
for ( ; block < block_count; block++ ) {
7b54: e1b0b82b lsrs fp, fp, #16
7b58: 0a0000a1 beq 7de4 <_Objects_Extend_information+0x2c4>
if ( information->object_blocks[ block ] == NULL ) {
7b5c: e5949000 ldr r9, [r4]
7b60: e3590000 cmp r9, #0
7b64: 11a03004 movne r3, r4
7b68: e1a0200a mov r2, sl
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
minimum_index = _Objects_Get_index( information->minimum_id );
7b6c: 11a06007 movne r6, r7
index_base = minimum_index;
block = 0;
7b70: 13a04000 movne r4, #0
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
minimum_index = _Objects_Get_index( information->minimum_id );
7b74: 01a06007 moveq r6, r7
index_base = minimum_index;
block = 0;
7b78: 01a04009 moveq r4, r9
block_count = 0;
else {
block_count = information->maximum / information->allocation_size;
for ( ; block < block_count; block++ ) {
if ( information->object_blocks[ block ] == NULL ) {
7b7c: 1a000003 bne 7b90 <_Objects_Extend_information+0x70>
7b80: ea000007 b 7ba4 <_Objects_Extend_information+0x84> <== NOT EXECUTED
7b84: e5b39004 ldr r9, [r3, #4]!
7b88: e3590000 cmp r9, #0
7b8c: 0a000004 beq 7ba4 <_Objects_Extend_information+0x84>
if ( information->object_blocks == NULL )
block_count = 0;
else {
block_count = information->maximum / information->allocation_size;
for ( ; block < block_count; block++ ) {
7b90: e2844001 add r4, r4, #1
7b94: e15b0004 cmp fp, r4
if ( information->object_blocks[ block ] == NULL ) {
do_extend = false;
break;
} else
index_base += information->allocation_size;
7b98: e086600a add r6, r6, sl
if ( information->object_blocks == NULL )
block_count = 0;
else {
block_count = information->maximum / information->allocation_size;
for ( ; block < block_count; block++ ) {
7b9c: 8afffff8 bhi 7b84 <_Objects_Extend_information+0x64>
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
7ba0: e3a09001 mov r9, #1
} else
index_base += information->allocation_size;
}
}
maximum = (uint32_t) information->maximum + information->allocation_size;
7ba4: e0888002 add r8, r8, r2
/*
* We need to limit the number of objects to the maximum number
* representable in the index portion of the object Id. In the
* case of 16-bit Ids, this is only 256 object instances.
*/
if ( maximum > OBJECTS_ID_FINAL_INDEX ) {
7ba8: e3580801 cmp r8, #65536 ; 0x10000
7bac: 2a000063 bcs 7d40 <_Objects_Extend_information+0x220>
/*
* Allocate the name table, and the objects and if it fails either return or
* generate a fatal error depending on auto-extending being active.
*/
block_size = information->allocation_size * information->size;
if ( information->auto_extend ) {
7bb0: e5d53012 ldrb r3, [r5, #18]
/*
* Allocate the name table, and the objects and if it fails either return or
* generate a fatal error depending on auto-extending being active.
*/
block_size = information->allocation_size * information->size;
7bb4: e5950018 ldr r0, [r5, #24]
if ( information->auto_extend ) {
7bb8: e3530000 cmp r3, #0
/*
* Allocate the name table, and the objects and if it fails either return or
* generate a fatal error depending on auto-extending being active.
*/
block_size = information->allocation_size * information->size;
7bbc: e0000092 mul r0, r2, r0
if ( information->auto_extend ) {
7bc0: 1a000060 bne 7d48 <_Objects_Extend_information+0x228>
new_object_block = _Workspace_Allocate( block_size );
if ( !new_object_block )
return;
} else {
new_object_block = _Workspace_Allocate_or_fatal_error( block_size );
7bc4: eb0008b3 bl 9e98 <_Workspace_Allocate_or_fatal_error>
7bc8: e58d0004 str r0, [sp, #4]
}
/*
* Do we need to grow the tables?
*/
if ( do_extend ) {
7bcc: e3590000 cmp r9, #0
7bd0: 0a000039 beq 7cbc <_Objects_Extend_information+0x19c>
*/
/*
* Up the block count and maximum
*/
block_count++;
7bd4: e28b9001 add r9, fp, #1
/*
* Allocate the tables and break it up.
*/
block_size = block_count *
(sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
7bd8: e0890089 add r0, r9, r9, lsl #1
((maximum + minimum_index) * sizeof(Objects_Control *));
if ( information->auto_extend ) {
7bdc: e5d53012 ldrb r3, [r5, #18]
/*
* Allocate the tables and break it up.
*/
block_size = block_count *
(sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
((maximum + minimum_index) * sizeof(Objects_Control *));
7be0: e0880000 add r0, r8, r0
/*
* Allocate the tables and break it up.
*/
block_size = block_count *
(sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
7be4: e0800007 add r0, r0, r7
((maximum + minimum_index) * sizeof(Objects_Control *));
if ( information->auto_extend ) {
7be8: e3530000 cmp r3, #0
block_count++;
/*
* Allocate the tables and break it up.
*/
block_size = block_count *
7bec: e1a00100 lsl r0, r0, #2
(sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
((maximum + minimum_index) * sizeof(Objects_Control *));
if ( information->auto_extend ) {
7bf0: 0a000059 beq 7d5c <_Objects_Extend_information+0x23c>
object_blocks = _Workspace_Allocate( block_size );
7bf4: eb00089d bl 9e70 <_Workspace_Allocate>
if ( !object_blocks ) {
7bf8: e250a000 subs sl, r0, #0
7bfc: 0a000075 beq 7dd8 <_Objects_Extend_information+0x2b8>
* Take the block count down. Saves all the (block_count - 1)
* in the copies.
*/
block_count--;
if ( information->maximum > minimum_index ) {
7c00: e1d531b0 ldrh r3, [r5, #16]
7c04: e1570003 cmp r7, r3
RTEMS_INLINE_ROUTINE void *_Addresses_Add_offset (
const void *base,
uintptr_t offset
)
{
return (void *)((uintptr_t)base + offset);
7c08: e08a3109 add r3, sl, r9, lsl #2
7c0c: e08a9189 add r9, sl, r9, lsl #3
7c10: 3a000058 bcc 7d78 <_Objects_Extend_information+0x258>
} else {
/*
* Deal with the special case of the 0 to minimum_index
*/
for ( index = 0; index < minimum_index; index++ ) {
7c14: e3570000 cmp r7, #0
7c18: 13a02000 movne r2, #0
7c1c: 11a01009 movne r1, r9
local_table[ index ] = NULL;
7c20: 11a00002 movne r0, r2
} else {
/*
* Deal with the special case of the 0 to minimum_index
*/
for ( index = 0; index < minimum_index; index++ ) {
7c24: 0a000003 beq 7c38 <_Objects_Extend_information+0x118>
7c28: e2822001 add r2, r2, #1
7c2c: e1570002 cmp r7, r2
local_table[ index ] = NULL;
7c30: e4810004 str r0, [r1], #4
} else {
/*
* Deal with the special case of the 0 to minimum_index
*/
for ( index = 0; index < minimum_index; index++ ) {
7c34: 8afffffb bhi 7c28 <_Objects_Extend_information+0x108>
7c38: e1a0b10b lsl fp, fp, #2
*/
object_blocks[block_count] = NULL;
inactive_per_block[block_count] = 0;
for ( index=index_base ;
index < ( information->allocation_size + index_base );
7c3c: e1d501b4 ldrh r0, [r5, #20]
7c40: e0860000 add r0, r6, r0
}
/*
* Initialise the new entries in the table.
*/
object_blocks[block_count] = NULL;
7c44: e3a0c000 mov ip, #0
inactive_per_block[block_count] = 0;
for ( index=index_base ;
7c48: e1560000 cmp r6, r0
}
/*
* Initialise the new entries in the table.
*/
object_blocks[block_count] = NULL;
7c4c: e78ac00b str ip, [sl, fp]
inactive_per_block[block_count] = 0;
7c50: e783c00b str ip, [r3, fp]
for ( index=index_base ;
7c54: 2a000005 bcs 7c70 <_Objects_Extend_information+0x150>
7c58: e0891106 add r1, r9, r6, lsl #2
* information - object information table
*
* Output parameters: NONE
*/
void _Objects_Extend_information(
7c5c: e1a02006 mov r2, r6
object_blocks[block_count] = NULL;
inactive_per_block[block_count] = 0;
for ( index=index_base ;
index < ( information->allocation_size + index_base );
index++ ) {
7c60: e2822001 add r2, r2, #1
* Initialise the new entries in the table.
*/
object_blocks[block_count] = NULL;
inactive_per_block[block_count] = 0;
for ( index=index_base ;
7c64: e1500002 cmp r0, r2
index < ( information->allocation_size + index_base );
index++ ) {
local_table[ index ] = NULL;
7c68: e481c004 str ip, [r1], #4
* Initialise the new entries in the table.
*/
object_blocks[block_count] = NULL;
inactive_per_block[block_count] = 0;
for ( index=index_base ;
7c6c: 8afffffb bhi 7c60 <_Objects_Extend_information+0x140>
7c70: e10f2000 mrs r2, CPSR
7c74: e3821080 orr r1, r2, #128 ; 0x80
7c78: e129f001 msr CPSR_fc, r1
uint32_t the_class,
uint32_t node,
uint32_t index
)
{
return (( (Objects_Id) the_api ) << OBJECTS_API_START_BIT) |
7c7c: e5951000 ldr r1, [r5]
information->object_blocks = object_blocks;
information->inactive_per_block = inactive_per_block;
information->local_table = local_table;
information->maximum = (Objects_Maximum) maximum;
information->maximum_id = _Objects_Build_id(
7c80: e1d500b4 ldrh r0, [r5, #4]
7c84: e1a01c01 lsl r1, r1, #24
old_tables = information->object_blocks;
information->object_blocks = object_blocks;
information->inactive_per_block = inactive_per_block;
information->local_table = local_table;
information->maximum = (Objects_Maximum) maximum;
7c88: e1a08808 lsl r8, r8, #16
7c8c: e3811801 orr r1, r1, #65536 ; 0x10000
7c90: e1a08828 lsr r8, r8, #16
(( (Objects_Id) the_class ) << OBJECTS_CLASS_START_BIT) |
7c94: e1811d80 orr r1, r1, r0, lsl #27
uint32_t the_class,
uint32_t node,
uint32_t index
)
{
return (( (Objects_Id) the_api ) << OBJECTS_API_START_BIT) |
7c98: e1811008 orr r1, r1, r8
local_table[ index ] = NULL;
}
_ISR_Disable( level );
old_tables = information->object_blocks;
7c9c: e5950034 ldr r0, [r5, #52] ; 0x34
information->object_blocks = object_blocks;
information->inactive_per_block = inactive_per_block;
7ca0: e5853030 str r3, [r5, #48] ; 0x30
_ISR_Disable( level );
old_tables = information->object_blocks;
information->object_blocks = object_blocks;
7ca4: e585a034 str sl, [r5, #52] ; 0x34
information->inactive_per_block = inactive_per_block;
information->local_table = local_table;
7ca8: e585901c str r9, [r5, #28]
information->maximum = (Objects_Maximum) maximum;
7cac: e1c581b0 strh r8, [r5, #16]
information->maximum_id = _Objects_Build_id(
7cb0: e585100c str r1, [r5, #12]
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
7cb4: e129f002 msr CPSR_fc, r2
information->maximum
);
_ISR_Enable( level );
_Workspace_Free( old_tables );
7cb8: eb000872 bl 9e88 <_Workspace_Free>
}
/*
* Assign the new object block to the object block table.
*/
information->object_blocks[ block ] = new_object_block;
7cbc: e5953034 ldr r3, [r5, #52] ; 0x34
7cc0: e59d2004 ldr r2, [sp, #4]
7cc4: e7832104 str r2, [r3, r4, lsl #2]
/*
* Initialize objects .. add to a local chain first.
*/
_Chain_Initialize(
7cc8: e5953034 ldr r3, [r5, #52] ; 0x34
7ccc: e28d0008 add r0, sp, #8
7cd0: e7931104 ldr r1, [r3, r4, lsl #2]
7cd4: e1d521b4 ldrh r2, [r5, #20]
7cd8: e5953018 ldr r3, [r5, #24]
}
/*
* Assign the new object block to the object block table.
*/
information->object_blocks[ block ] = new_object_block;
7cdc: e1a04104 lsl r4, r4, #2
/*
* Initialize objects .. add to a local chain first.
*/
_Chain_Initialize(
7ce0: ebfffcf2 bl 70b0 <_Chain_Initialize>
information->the_class,
_Objects_Local_node,
index
);
_Chain_Append( &information->Inactive, &the_object->Node );
7ce4: e2857020 add r7, r5, #32
/*
* Move from the local chain, initialise, then append to the inactive chain
*/
index = index_base;
while ((the_object = (Objects_Control *) _Chain_Get( &Inactive )) != NULL ) {
7ce8: ea000008 b 7d10 <_Objects_Extend_information+0x1f0>
7cec: e5952000 ldr r2, [r5]
the_object->id = _Objects_Build_id(
7cf0: e1d5c0b4 ldrh ip, [r5, #4]
7cf4: e1a02c02 lsl r2, r2, #24
7cf8: e3822801 orr r2, r2, #65536 ; 0x10000
(( (Objects_Id) the_class ) << OBJECTS_CLASS_START_BIT) |
7cfc: e1822d8c orr r2, r2, ip, lsl #27
uint32_t the_class,
uint32_t node,
uint32_t index
)
{
return (( (Objects_Id) the_api ) << OBJECTS_API_START_BIT) |
7d00: e1822006 orr r2, r2, r6
7d04: e5832008 str r2, [r3, #8]
information->the_class,
_Objects_Local_node,
index
);
_Chain_Append( &information->Inactive, &the_object->Node );
7d08: ebfffcd0 bl 7050 <_Chain_Append>
index++;
7d0c: e2866001 add r6, r6, #1
/*
* Move from the local chain, initialise, then append to the inactive chain
*/
index = index_base;
while ((the_object = (Objects_Control *) _Chain_Get( &Inactive )) != NULL ) {
7d10: e28d0008 add r0, sp, #8
7d14: ebfffcd8 bl 707c <_Chain_Get>
7d18: e2503000 subs r3, r0, #0
information->the_class,
_Objects_Local_node,
index
);
_Chain_Append( &information->Inactive, &the_object->Node );
7d1c: e1a01003 mov r1, r3
7d20: e1a00007 mov r0, r7
/*
* Move from the local chain, initialise, then append to the inactive chain
*/
index = index_base;
while ((the_object = (Objects_Control *) _Chain_Get( &Inactive )) != NULL ) {
7d24: 1afffff0 bne 7cec <_Objects_Extend_information+0x1cc>
index++;
}
information->inactive_per_block[ block ] = information->allocation_size;
information->inactive =
(Objects_Maximum)(information->inactive + information->allocation_size);
7d28: e1d522bc ldrh r2, [r5, #44] ; 0x2c
_Chain_Append( &information->Inactive, &the_object->Node );
index++;
}
information->inactive_per_block[ block ] = information->allocation_size;
7d2c: e1d531b4 ldrh r3, [r5, #20]
7d30: e5951030 ldr r1, [r5, #48] ; 0x30
information->inactive =
(Objects_Maximum)(information->inactive + information->allocation_size);
7d34: e0832002 add r2, r3, r2
_Chain_Append( &information->Inactive, &the_object->Node );
index++;
}
information->inactive_per_block[ block ] = information->allocation_size;
7d38: e7813004 str r3, [r1, r4]
information->inactive =
7d3c: e1c522bc strh r2, [r5, #44] ; 0x2c
(Objects_Maximum)(information->inactive + information->allocation_size);
}
7d40: e28dd014 add sp, sp, #20
7d44: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}
* Allocate the name table, and the objects and if it fails either return or
* generate a fatal error depending on auto-extending being active.
*/
block_size = information->allocation_size * information->size;
if ( information->auto_extend ) {
new_object_block = _Workspace_Allocate( block_size );
7d48: eb000848 bl 9e70 <_Workspace_Allocate>
if ( !new_object_block )
7d4c: e3500000 cmp r0, #0
7d50: e58d0004 str r0, [sp, #4]
7d54: 1affff9c bne 7bcc <_Objects_Extend_information+0xac>
7d58: eafffff8 b 7d40 <_Objects_Extend_information+0x220>
if ( !object_blocks ) {
_Workspace_Free( new_object_block );
return;
}
} else {
object_blocks = _Workspace_Allocate_or_fatal_error( block_size );
7d5c: eb00084d bl 9e98 <_Workspace_Allocate_or_fatal_error>
* Take the block count down. Saves all the (block_count - 1)
* in the copies.
*/
block_count--;
if ( information->maximum > minimum_index ) {
7d60: e1d531b0 ldrh r3, [r5, #16]
if ( !object_blocks ) {
_Workspace_Free( new_object_block );
return;
}
} else {
object_blocks = _Workspace_Allocate_or_fatal_error( block_size );
7d64: e1a0a000 mov sl, r0
* Take the block count down. Saves all the (block_count - 1)
* in the copies.
*/
block_count--;
if ( information->maximum > minimum_index ) {
7d68: e1570003 cmp r7, r3
7d6c: e08a3109 add r3, sl, r9, lsl #2
7d70: e08a9189 add r9, sl, r9, lsl #3
7d74: 2affffa6 bcs 7c14 <_Objects_Extend_information+0xf4>
/*
* Copy each section of the table over. This has to be performed as
* separate parts as size of each block has changed.
*/
memcpy( object_blocks,
7d78: e1a0b10b lsl fp, fp, #2
7d7c: e5951034 ldr r1, [r5, #52] ; 0x34
7d80: e1a0200b mov r2, fp
7d84: e1a0000a mov r0, sl
7d88: e58d3000 str r3, [sp]
7d8c: eb0018cf bl e0d0 <memcpy>
information->object_blocks,
block_count * sizeof(void*) );
memcpy( inactive_per_block,
7d90: e59d3000 ldr r3, [sp]
7d94: e5951030 ldr r1, [r5, #48] ; 0x30
7d98: e1a00003 mov r0, r3
7d9c: e1a0200b mov r2, fp
7da0: eb0018ca bl e0d0 <memcpy>
information->inactive_per_block,
block_count * sizeof(uint32_t) );
memcpy( local_table,
information->local_table,
(information->maximum + minimum_index) * sizeof(Objects_Control *) );
7da4: e1d521b0 ldrh r2, [r5, #16]
7da8: e0872002 add r2, r7, r2
information->object_blocks,
block_count * sizeof(void*) );
memcpy( inactive_per_block,
information->inactive_per_block,
block_count * sizeof(uint32_t) );
memcpy( local_table,
7dac: e1a00009 mov r0, r9
7db0: e595101c ldr r1, [r5, #28]
7db4: e1a02102 lsl r2, r2, #2
7db8: eb0018c4 bl e0d0 <memcpy>
7dbc: e59d3000 ldr r3, [sp]
7dc0: eaffff9d b 7c3c <_Objects_Extend_information+0x11c>
minimum_index = _Objects_Get_index( information->minimum_id );
index_base = minimum_index;
block = 0;
/* if ( information->maximum < minimum_index ) */
if ( information->object_blocks == NULL )
7dc4: e1d021b4 ldrh r2, [r0, #20]
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
minimum_index = _Objects_Get_index( information->minimum_id );
7dc8: e1a06007 mov r6, r7
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
7dcc: e3a09001 mov r9, #1
index_base = minimum_index;
block = 0;
/* if ( information->maximum < minimum_index ) */
if ( information->object_blocks == NULL )
block_count = 0;
7dd0: e1a0b004 mov fp, r4
7dd4: eaffff72 b 7ba4 <_Objects_Extend_information+0x84>
(sizeof(void *) + sizeof(uint32_t) + sizeof(Objects_Name *)) +
((maximum + minimum_index) * sizeof(Objects_Control *));
if ( information->auto_extend ) {
object_blocks = _Workspace_Allocate( block_size );
if ( !object_blocks ) {
_Workspace_Free( new_object_block );
7dd8: e59d0004 ldr r0, [sp, #4]
7ddc: eb000829 bl 9e88 <_Workspace_Free>
return;
7de0: eaffffd6 b 7d40 <_Objects_Extend_information+0x220>
if ( information->object_blocks == NULL )
block_count = 0;
else {
block_count = information->maximum / information->allocation_size;
for ( ; block < block_count; block++ ) {
7de4: e1a0200a mov r2, sl <== NOT EXECUTED
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
minimum_index = _Objects_Get_index( information->minimum_id );
7de8: e1a06007 mov r6, r7 <== NOT EXECUTED
/*
* Search for a free block of indexes. If we do NOT need to allocate or
* extend the block table, then we will change do_extend.
*/
do_extend = true;
7dec: e3a09001 mov r9, #1 <== NOT EXECUTED
minimum_index = _Objects_Get_index( information->minimum_id );
index_base = minimum_index;
block = 0;
7df0: e1a0400b mov r4, fp <== NOT EXECUTED
7df4: eaffff6a b 7ba4 <_Objects_Extend_information+0x84> <== NOT EXECUTED
0000812c <_Objects_Shrink_information>:
#include <rtems/score/isr.h>
void _Objects_Shrink_information(
Objects_Information *information
)
{
812c: e92d40f0 push {r4, r5, r6, r7, lr}
/*
* Search the list to find block or chunk with all objects inactive.
*/
index_base = _Objects_Get_index( information->minimum_id );
8130: e1d040b8 ldrh r4, [r0, #8]
block_count = (information->maximum - index_base) /
8134: e1d051b4 ldrh r5, [r0, #20]
#include <rtems/score/isr.h>
void _Objects_Shrink_information(
Objects_Information *information
)
{
8138: e1a06000 mov r6, r0
/*
* Search the list to find block or chunk with all objects inactive.
*/
index_base = _Objects_Get_index( information->minimum_id );
block_count = (information->maximum - index_base) /
813c: e1d001b0 ldrh r0, [r0, #16]
8140: e1a01005 mov r1, r5
8144: e0640000 rsb r0, r4, r0
8148: eb003997 bl 167ac <__aeabi_uidiv>
information->allocation_size;
for ( block = 0; block < block_count; block++ ) {
814c: e3500000 cmp r0, #0
8150: 08bd80f0 popeq {r4, r5, r6, r7, pc}
if ( information->inactive_per_block[ block ] ==
8154: e5962030 ldr r2, [r6, #48] ; 0x30
8158: e5923000 ldr r3, [r2]
815c: e1550003 cmp r5, r3
index_base = _Objects_Get_index( information->minimum_id );
block_count = (information->maximum - index_base) /
information->allocation_size;
for ( block = 0; block < block_count; block++ ) {
8160: 13a03000 movne r3, #0
if ( information->inactive_per_block[ block ] ==
8164: 1a000005 bne 8180 <_Objects_Shrink_information+0x54>
8168: ea000008 b 8190 <_Objects_Shrink_information+0x64> <== NOT EXECUTED
816c: e5b21004 ldr r1, [r2, #4]!
8170: e1550001 cmp r5, r1
information->inactive -= information->allocation_size;
return;
}
index_base += information->allocation_size;
8174: e0844005 add r4, r4, r5
index_base = _Objects_Get_index( information->minimum_id );
block_count = (information->maximum - index_base) /
information->allocation_size;
for ( block = 0; block < block_count; block++ ) {
if ( information->inactive_per_block[ block ] ==
8178: e1a07103 lsl r7, r3, #2
817c: 0a000004 beq 8194 <_Objects_Shrink_information+0x68>
index_base = _Objects_Get_index( information->minimum_id );
block_count = (information->maximum - index_base) /
information->allocation_size;
for ( block = 0; block < block_count; block++ ) {
8180: e2833001 add r3, r3, #1
8184: e1530000 cmp r3, r0
8188: 1afffff7 bne 816c <_Objects_Shrink_information+0x40>
818c: e8bd80f0 pop {r4, r5, r6, r7, pc}
if ( information->inactive_per_block[ block ] ==
8190: e3a07000 mov r7, #0 <== NOT EXECUTED
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
Chain_Control *the_chain
)
{
return _Chain_Head( the_chain )->next;
8194: e5960020 ldr r0, [r6, #32]
8198: ea000002 b 81a8 <_Objects_Shrink_information+0x7c>
if ((index >= index_base) &&
(index < (index_base + information->allocation_size))) {
_Chain_Extract( &extract_me->Node );
}
}
while ( the_object );
819c: e3550000 cmp r5, #0
81a0: 0a00000b beq 81d4 <_Objects_Shrink_information+0xa8>
index = _Objects_Get_index( the_object->id );
/*
* Get the next node before the node is extracted
*/
extract_me = the_object;
the_object = (Objects_Control *) the_object->Node.next;
81a4: e1a00005 mov r0, r5
* Assume the Inactive chain is never empty at this point
*/
the_object = (Objects_Control *) _Chain_First( &information->Inactive );
do {
index = _Objects_Get_index( the_object->id );
81a8: e1d030b8 ldrh r3, [r0, #8]
/*
* Get the next node before the node is extracted
*/
extract_me = the_object;
the_object = (Objects_Control *) the_object->Node.next;
if ((index >= index_base) &&
81ac: e1530004 cmp r3, r4
index = _Objects_Get_index( the_object->id );
/*
* Get the next node before the node is extracted
*/
extract_me = the_object;
the_object = (Objects_Control *) the_object->Node.next;
81b0: e5905000 ldr r5, [r0]
if ((index >= index_base) &&
81b4: 3afffff8 bcc 819c <_Objects_Shrink_information+0x70>
(index < (index_base + information->allocation_size))) {
81b8: e1d621b4 ldrh r2, [r6, #20]
81bc: e0842002 add r2, r4, r2
/*
* Get the next node before the node is extracted
*/
extract_me = the_object;
the_object = (Objects_Control *) the_object->Node.next;
if ((index >= index_base) &&
81c0: e1530002 cmp r3, r2
81c4: 2afffff4 bcs 819c <_Objects_Shrink_information+0x70>
(index < (index_base + information->allocation_size))) {
_Chain_Extract( &extract_me->Node );
81c8: eb000de3 bl b95c <_Chain_Extract>
}
}
while ( the_object );
81cc: e3550000 cmp r5, #0
81d0: 1afffff3 bne 81a4 <_Objects_Shrink_information+0x78>
/*
* Free the memory and reset the structures in the object' information
*/
_Workspace_Free( information->object_blocks[ block ] );
81d4: e5963034 ldr r3, [r6, #52] ; 0x34
81d8: e7930007 ldr r0, [r3, r7]
81dc: eb000729 bl 9e88 <_Workspace_Free>
information->object_blocks[ block ] = NULL;
81e0: e5963034 ldr r3, [r6, #52] ; 0x34
81e4: e7835007 str r5, [r3, r7]
information->inactive_per_block[ block ] = 0;
information->inactive -= information->allocation_size;
81e8: e1d612bc ldrh r1, [r6, #44] ; 0x2c
81ec: e1d631b4 ldrh r3, [r6, #20]
* Free the memory and reset the structures in the object' information
*/
_Workspace_Free( information->object_blocks[ block ] );
information->object_blocks[ block ] = NULL;
information->inactive_per_block[ block ] = 0;
81f0: e5962030 ldr r2, [r6, #48] ; 0x30
information->inactive -= information->allocation_size;
81f4: e0633001 rsb r3, r3, r1
* Free the memory and reset the structures in the object' information
*/
_Workspace_Free( information->object_blocks[ block ] );
information->object_blocks[ block ] = NULL;
information->inactive_per_block[ block ] = 0;
81f8: e7825007 str r5, [r2, r7]
information->inactive -= information->allocation_size;
81fc: e1c632bc strh r3, [r6, #44] ; 0x2c
return;
8200: e8bd80f0 pop {r4, r5, r6, r7, pc}
00008ef8 <_RBTree_Extract_validate_unprotected>:
)
{
RBTree_Node *parent, *sibling;
RBTree_Direction dir;
parent = the_node->parent;
8ef8: e5903000 ldr r3, [r0]
if(!parent->parent) return;
8efc: e5932000 ldr r2, [r3]
8f00: e3520000 cmp r2, #0
* of the extract operation.
*/
static void _RBTree_Extract_validate_unprotected(
RBTree_Node *the_node
)
{
8f04: e92d07f0 push {r4, r5, r6, r7, r8, r9, sl}
RBTree_Node *parent, *sibling;
RBTree_Direction dir;
parent = the_node->parent;
if(!parent->parent) return;
8f08: 0a00002f beq 8fcc <_RBTree_Extract_validate_unprotected+0xd4>
{
if(!the_node) return NULL;
if(!(the_node->parent)) return NULL;
if(!(the_node->parent->parent)) return NULL;
if(the_node == the_node->parent->child[RBT_LEFT])
8f0c: e5932004 ldr r2, [r3, #4]
8f10: e1500002 cmp r0, r2
return the_node->parent->child[RBT_RIGHT];
8f14: 05932008 ldreq r2, [r3, #8]
* Now the_node has a black sibling and red parent. After rotation,
* update sibling pointer.
*/
if (_RBTree_Is_red(sibling)) {
parent->color = RBT_RED;
sibling->color = RBT_BLACK;
8f18: e3a06000 mov r6, #0
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
8f1c: e3a05001 mov r5, #1
8f20: ea000022 b 8fb0 <_RBTree_Extract_validate_unprotected+0xb8>
if(!parent->parent) return;
sibling = _RBTree_Sibling(the_node);
/* continue to correct tree as long as the_node is black and not the root */
while (!_RBTree_Is_red(the_node) && parent->parent) {
8f24: e5931000 ldr r1, [r3]
8f28: e3510000 cmp r1, #0
8f2c: 0a000022 beq 8fbc <_RBTree_Extract_validate_unprotected+0xc4>
*/
RTEMS_INLINE_ROUTINE bool _RBTree_Is_red(
const RBTree_Node *the_node
)
{
return (the_node && the_node->color == RBT_RED);
8f30: e3520000 cmp r2, #0
8f34: 0a000002 beq 8f44 <_RBTree_Extract_validate_unprotected+0x4c>
8f38: e592c00c ldr ip, [r2, #12]
8f3c: e35c0001 cmp ip, #1
8f40: 0a000023 beq 8fd4 <_RBTree_Extract_validate_unprotected+0xdc>
_RBTree_Rotate(parent, dir);
sibling = parent->child[_RBTree_Opposite_direction(dir)];
}
/* sibling is black, see if both of its children are also black. */
if (!_RBTree_Is_red(sibling->child[RBT_RIGHT]) &&
8f44: e5921008 ldr r1, [r2, #8]
8f48: e3510000 cmp r1, #0
8f4c: 0a000002 beq 8f5c <_RBTree_Extract_validate_unprotected+0x64>
8f50: e591c00c ldr ip, [r1, #12]
8f54: e35c0001 cmp ip, #1
8f58: 0a000042 beq 9068 <_RBTree_Extract_validate_unprotected+0x170>
!_RBTree_Is_red(sibling->child[RBT_LEFT])) {
8f5c: e592c004 ldr ip, [r2, #4]
8f60: e35c0000 cmp ip, #0
8f64: 0a000002 beq 8f74 <_RBTree_Extract_validate_unprotected+0x7c>
8f68: e59cc00c ldr ip, [ip, #12]
8f6c: e35c0001 cmp ip, #1
8f70: 0a00003c beq 9068 <_RBTree_Extract_validate_unprotected+0x170>
sibling->color = RBT_RED;
8f74: e582500c str r5, [r2, #12]
8f78: e593200c ldr r2, [r3, #12]
8f7c: e3520001 cmp r2, #1
8f80: 0a000033 beq 9054 <_RBTree_Extract_validate_unprotected+0x15c>
if (_RBTree_Is_red(parent)) {
parent->color = RBT_BLACK;
break;
}
the_node = parent; /* done if parent is red */
parent = the_node->parent;
8f84: e5931000 ldr r1, [r3]
RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Sibling(
const RBTree_Node *the_node
)
{
if(!the_node) return NULL;
if(!(the_node->parent)) return NULL;
8f88: e3510000 cmp r1, #0
8f8c: 0a000033 beq 9060 <_RBTree_Extract_validate_unprotected+0x168>
if(!(the_node->parent->parent)) return NULL;
8f90: e5912000 ldr r2, [r1]
8f94: e3520000 cmp r2, #0
8f98: 0a000002 beq 8fa8 <_RBTree_Extract_validate_unprotected+0xb0>
if(the_node == the_node->parent->child[RBT_LEFT])
8f9c: e5912004 ldr r2, [r1, #4]
8fa0: e1530002 cmp r3, r2
return the_node->parent->child[RBT_RIGHT];
8fa4: 05912008 ldreq r2, [r1, #8]
c->child[dir] = the_node;
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
c->parent = the_node->parent;
the_node->parent = c;
8fa8: e1a00003 mov r0, r3
RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Sibling(
const RBTree_Node *the_node
)
{
if(!the_node) return NULL;
if(!(the_node->parent)) return NULL;
8fac: e1a03001 mov r3, r1
*/
RTEMS_INLINE_ROUTINE bool _RBTree_Is_red(
const RBTree_Node *the_node
)
{
return (the_node && the_node->color == RBT_RED);
8fb0: e590100c ldr r1, [r0, #12]
8fb4: e3510001 cmp r1, #1
8fb8: 1affffd9 bne 8f24 <_RBTree_Extract_validate_unprotected+0x2c>
sibling->child[_RBTree_Opposite_direction(dir)]->color = RBT_BLACK;
_RBTree_Rotate(parent, dir);
break; /* done */
}
} /* while */
if(!the_node->parent->parent) the_node->color = RBT_BLACK;
8fbc: e5903000 ldr r3, [r0]
8fc0: e5933000 ldr r3, [r3]
8fc4: e3530000 cmp r3, #0
8fc8: 0580300c streq r3, [r0, #12]
}
8fcc: e8bd07f0 pop {r4, r5, r6, r7, r8, r9, sl}
8fd0: e12fff1e bx lr
* update sibling pointer.
*/
if (_RBTree_Is_red(sibling)) {
parent->color = RBT_RED;
sibling->color = RBT_BLACK;
dir = the_node != parent->child[0];
8fd4: e5934004 ldr r4, [r3, #4]
8fd8: e054a000 subs sl, r4, r0
8fdc: 13a0a001 movne sl, #1
* This function maintains the properties of the red-black tree.
*
* @note It does NOT disable interrupts to ensure the atomicity
* of the extract operation.
*/
static void _RBTree_Extract_validate_unprotected(
8fe0: e22a7001 eor r7, sl, #1
RBTree_Direction dir
)
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
8fe4: e2878001 add r8, r7, #1
8fe8: e7939108 ldr r9, [r3, r8, lsl #2]
8fec: e3590000 cmp r9, #0
* then rotate parent left, making the sibling be the_node's grandparent.
* Now the_node has a black sibling and red parent. After rotation,
* update sibling pointer.
*/
if (_RBTree_Is_red(sibling)) {
parent->color = RBT_RED;
8ff0: e583c00c str ip, [r3, #12]
sibling->color = RBT_BLACK;
8ff4: e582600c str r6, [r2, #12]
8ff8: 01a02009 moveq r2, r9
8ffc: 0affffd0 beq 8f44 <_RBTree_Extract_validate_unprotected+0x4c>
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
9000: e3570000 cmp r7, #0
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
9004: 15934008 ldrne r4, [r3, #8]
the_node->child[_RBTree_Opposite_direction(dir)] = c->child[dir];
9008: e28a2001 add r2, sl, #1
900c: e794a102 ldr sl, [r4, r2, lsl #2]
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
9010: 01a0c007 moveq ip, r7
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
the_node->child[_RBTree_Opposite_direction(dir)] = c->child[dir];
9014: e28cc001 add ip, ip, #1
9018: e783a10c str sl, [r3, ip, lsl #2]
if (c->child[dir])
901c: e794c102 ldr ip, [r4, r2, lsl #2]
9020: e35c0000 cmp ip, #0
c->child[dir]->parent = the_node;
9024: 158c3000 strne r3, [ip]
c->child[dir] = the_node;
9028: e7843102 str r3, [r4, r2, lsl #2]
902c: 15931000 ldrne r1, [r3]
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
9030: e5912004 ldr r2, [r1, #4]
9034: e1530002 cmp r3, r2
9038: 13a02008 movne r2, #8
903c: 03a02004 moveq r2, #4
9040: e7824001 str r4, [r2, r1]
c->parent = the_node->parent;
9044: e5841000 str r1, [r4]
the_node->parent = c;
9048: e7932108 ldr r2, [r3, r8, lsl #2]
904c: e5834000 str r4, [r3]
9050: eaffffbb b 8f44 <_RBTree_Extract_validate_unprotected+0x4c>
/* sibling is black, see if both of its children are also black. */
if (!_RBTree_Is_red(sibling->child[RBT_RIGHT]) &&
!_RBTree_Is_red(sibling->child[RBT_LEFT])) {
sibling->color = RBT_RED;
if (_RBTree_Is_red(parent)) {
parent->color = RBT_BLACK;
9054: e3a02000 mov r2, #0
9058: e583200c str r2, [r3, #12]
break;
905c: eaffffd6 b 8fbc <_RBTree_Extract_validate_unprotected+0xc4>
RTEMS_INLINE_ROUTINE RBTree_Node *_RBTree_Sibling(
const RBTree_Node *the_node
)
{
if(!the_node) return NULL;
if(!(the_node->parent)) return NULL;
9060: e1a02001 mov r2, r1 <== NOT EXECUTED
9064: eaffffcf b 8fa8 <_RBTree_Extract_validate_unprotected+0xb0><== NOT EXECUTED
* cases, either the_node is to the left or the right of the parent.
* In both cases, first check if one of sibling's children is black,
* and if so rotate in the proper direction and update sibling pointer.
* Then switch the sibling and parent colors, and rotate through parent.
*/
dir = the_node != parent->child[0];
9068: e5936004 ldr r6, [r3, #4]
906c: e0566000 subs r6, r6, r0
9070: 13a06001 movne r6, #1
* This function maintains the properties of the red-black tree.
*
* @note It does NOT disable interrupts to ensure the atomicity
* of the extract operation.
*/
static void _RBTree_Extract_validate_unprotected(
9074: e2265001 eor r5, r6, #1
* In both cases, first check if one of sibling's children is black,
* and if so rotate in the proper direction and update sibling pointer.
* Then switch the sibling and parent colors, and rotate through parent.
*/
dir = the_node != parent->child[0];
if (!_RBTree_Is_red(sibling->child[_RBTree_Opposite_direction(dir)])) {
9078: e285c001 add ip, r5, #1
907c: e792410c ldr r4, [r2, ip, lsl #2]
*/
RTEMS_INLINE_ROUTINE bool _RBTree_Is_red(
const RBTree_Node *the_node
)
{
return (the_node && the_node->color == RBT_RED);
9080: e3540000 cmp r4, #0
9084: 0a000003 beq 9098 <_RBTree_Extract_validate_unprotected+0x1a0>
9088: e594700c ldr r7, [r4, #12]
908c: e3570001 cmp r7, #1
9090: 0793710c ldreq r7, [r3, ip, lsl #2]
9094: 0a00001f beq 9118 <_RBTree_Extract_validate_unprotected+0x220>
* This function maintains the properties of the red-black tree.
*
* @note It does NOT disable interrupts to ensure the atomicity
* of the extract operation.
*/
static void _RBTree_Extract_validate_unprotected(
9098: e2254001 eor r4, r5, #1
RBTree_Direction dir
)
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
909c: e2848001 add r8, r4, #1
90a0: e792a108 ldr sl, [r2, r8, lsl #2]
* Then switch the sibling and parent colors, and rotate through parent.
*/
dir = the_node != parent->child[0];
if (!_RBTree_Is_red(sibling->child[_RBTree_Opposite_direction(dir)])) {
sibling->color = RBT_RED;
sibling->child[dir]->color = RBT_BLACK;
90a4: e2867001 add r7, r6, #1
90a8: e7928107 ldr r8, [r2, r7, lsl #2]
90ac: e35a0000 cmp sl, #0
* and if so rotate in the proper direction and update sibling pointer.
* Then switch the sibling and parent colors, and rotate through parent.
*/
dir = the_node != parent->child[0];
if (!_RBTree_Is_red(sibling->child[_RBTree_Opposite_direction(dir)])) {
sibling->color = RBT_RED;
90b0: e3a07001 mov r7, #1
sibling->child[dir]->color = RBT_BLACK;
90b4: e3a0a000 mov sl, #0
* and if so rotate in the proper direction and update sibling pointer.
* Then switch the sibling and parent colors, and rotate through parent.
*/
dir = the_node != parent->child[0];
if (!_RBTree_Is_red(sibling->child[_RBTree_Opposite_direction(dir)])) {
sibling->color = RBT_RED;
90b8: e582700c str r7, [r2, #12]
sibling->child[dir]->color = RBT_BLACK;
90bc: e588a00c str sl, [r8, #12]
90c0: 0a000011 beq 910c <_RBTree_Extract_validate_unprotected+0x214>
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
90c4: e3540000 cmp r4, #0
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
90c8: 05921004 ldreq r1, [r2, #4]
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
90cc: 11a04007 movne r4, r7
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
the_node->child[_RBTree_Opposite_direction(dir)] = c->child[dir];
90d0: e791810c ldr r8, [r1, ip, lsl #2]
90d4: e2844001 add r4, r4, #1
90d8: e7828104 str r8, [r2, r4, lsl #2]
if (c->child[dir])
90dc: e791410c ldr r4, [r1, ip, lsl #2]
90e0: e3540000 cmp r4, #0
c->child[dir]->parent = the_node;
90e4: 15842000 strne r2, [r4]
c->child[dir] = the_node;
90e8: e781210c str r2, [r1, ip, lsl #2]
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
90ec: e5924000 ldr r4, [r2]
90f0: e5947004 ldr r7, [r4, #4]
90f4: e1520007 cmp r2, r7
90f8: 13a07008 movne r7, #8
90fc: 03a07004 moveq r7, #4
c->parent = the_node->parent;
9100: e5814000 str r4, [r1]
if (c->child[dir])
c->child[dir]->parent = the_node;
c->child[dir] = the_node;
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
9104: e7871004 str r1, [r7, r4]
c->parent = the_node->parent;
the_node->parent = c;
9108: e5821000 str r1, [r2]
_RBTree_Rotate(sibling, _RBTree_Opposite_direction(dir));
sibling = parent->child[_RBTree_Opposite_direction(dir)];
910c: e793210c ldr r2, [r3, ip, lsl #2]
9110: e792410c ldr r4, [r2, ip, lsl #2]
9114: e1a07002 mov r7, r2
}
sibling->color = parent->color;
9118: e593c00c ldr ip, [r3, #12]
parent->color = RBT_BLACK;
911c: e3a01000 mov r1, #0
RBTree_Direction dir
)
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
9120: e1570001 cmp r7, r1
sibling->color = RBT_RED;
sibling->child[dir]->color = RBT_BLACK;
_RBTree_Rotate(sibling, _RBTree_Opposite_direction(dir));
sibling = parent->child[_RBTree_Opposite_direction(dir)];
}
sibling->color = parent->color;
9124: e582c00c str ip, [r2, #12]
parent->color = RBT_BLACK;
9128: e583100c str r1, [r3, #12]
sibling->child[_RBTree_Opposite_direction(dir)]->color = RBT_BLACK;
912c: e584100c str r1, [r4, #12]
9130: 0affffa1 beq 8fbc <_RBTree_Extract_validate_unprotected+0xc4>
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
9134: e1550001 cmp r5, r1
{
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
9138: 05932004 ldreq r2, [r3, #4]
913c: 15932008 ldrne r2, [r3, #8]
the_node->child[_RBTree_Opposite_direction(dir)] = c->child[dir];
9140: e2866001 add r6, r6, #1
9144: e7921106 ldr r1, [r2, r6, lsl #2]
*/
RTEMS_INLINE_ROUTINE RBTree_Direction _RBTree_Opposite_direction(
RBTree_Direction the_dir
)
{
return (RBTree_Direction) !((int) the_dir);
9148: 13a05001 movne r5, #1
RBTree_Node *c;
if (the_node == NULL) return;
if (the_node->child[_RBTree_Opposite_direction(dir)] == NULL) return;
c = the_node->child[_RBTree_Opposite_direction(dir)];
the_node->child[_RBTree_Opposite_direction(dir)] = c->child[dir];
914c: e2855001 add r5, r5, #1
9150: e7831105 str r1, [r3, r5, lsl #2]
if (c->child[dir])
9154: e7921106 ldr r1, [r2, r6, lsl #2]
9158: e3510000 cmp r1, #0
c->child[dir]->parent = the_node;
915c: 15813000 strne r3, [r1]
c->child[dir] = the_node;
9160: e7823106 str r3, [r2, r6, lsl #2]
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
9164: e5931000 ldr r1, [r3]
9168: e591c004 ldr ip, [r1, #4]
916c: e153000c cmp r3, ip
9170: 13a0c008 movne ip, #8
9174: 03a0c004 moveq ip, #4
c->parent = the_node->parent;
9178: e5821000 str r1, [r2]
if (c->child[dir])
c->child[dir]->parent = the_node;
c->child[dir] = the_node;
the_node->parent->child[the_node != the_node->parent->child[0]] = c;
917c: e78c2001 str r2, [ip, r1]
c->parent = the_node->parent;
the_node->parent = c;
9180: e5832000 str r2, [r3]
9184: eaffff8c b 8fbc <_RBTree_Extract_validate_unprotected+0xc4>
00009bd8 <_Scheduler_CBS_Cleanup>:
#include <rtems/config.h>
#include <rtems/score/scheduler.h>
#include <rtems/score/schedulercbs.h>
int _Scheduler_CBS_Cleanup (void)
{
9bd8: e92d4070 push {r4, r5, r6, lr}
unsigned int i;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9bdc: e59f5054 ldr r5, [pc, #84] ; 9c38 <_Scheduler_CBS_Cleanup+0x60>
9be0: e5953000 ldr r3, [r5]
9be4: e3530000 cmp r3, #0
9be8: 0a00000f beq 9c2c <_Scheduler_CBS_Cleanup+0x54>
9bec: e59f6048 ldr r6, [pc, #72] ; 9c3c <_Scheduler_CBS_Cleanup+0x64>
9bf0: e5960000 ldr r0, [r6]
9bf4: e3a04000 mov r4, #0
if ( _Scheduler_CBS_Server_list[ i ] )
9bf8: e7903104 ldr r3, [r0, r4, lsl #2]
9bfc: e3530000 cmp r3, #0
9c00: 0a000002 beq 9c10 <_Scheduler_CBS_Cleanup+0x38>
_Scheduler_CBS_Destroy_server( i );
9c04: e1a00004 mov r0, r4
9c08: eb000043 bl 9d1c <_Scheduler_CBS_Destroy_server>
9c0c: e5960000 ldr r0, [r6]
int _Scheduler_CBS_Cleanup (void)
{
unsigned int i;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9c10: e5953000 ldr r3, [r5]
9c14: e2844001 add r4, r4, #1
9c18: e1530004 cmp r3, r4
9c1c: 8afffff5 bhi 9bf8 <_Scheduler_CBS_Cleanup+0x20>
if ( _Scheduler_CBS_Server_list[ i ] )
_Scheduler_CBS_Destroy_server( i );
}
_Workspace_Free( _Scheduler_CBS_Server_list );
9c20: eb0007cc bl bb58 <_Workspace_Free>
return SCHEDULER_CBS_OK;
}
9c24: e3a00000 mov r0, #0
9c28: e8bd8070 pop {r4, r5, r6, pc}
int _Scheduler_CBS_Cleanup (void)
{
unsigned int i;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9c2c: e59f3008 ldr r3, [pc, #8] ; 9c3c <_Scheduler_CBS_Cleanup+0x64><== NOT EXECUTED
9c30: e5930000 ldr r0, [r3] <== NOT EXECUTED
9c34: eafffff9 b 9c20 <_Scheduler_CBS_Cleanup+0x48> <== NOT EXECUTED
00009c40 <_Scheduler_CBS_Create_server>:
)
{
unsigned int i;
Scheduler_CBS_Server *the_server;
if ( params->budget <= 0 ||
9c40: e5903004 ldr r3, [r0, #4]
9c44: e3530000 cmp r3, #0
int _Scheduler_CBS_Create_server (
Scheduler_CBS_Parameters *params,
Scheduler_CBS_Budget_overrun budget_overrun_callback,
rtems_id *server_id
)
{
9c48: e92d45f0 push {r4, r5, r6, r7, r8, sl, lr}
9c4c: e1a04000 mov r4, r0
9c50: e1a05001 mov r5, r1
9c54: e1a07002 mov r7, r2
unsigned int i;
Scheduler_CBS_Server *the_server;
if ( params->budget <= 0 ||
9c58: da000029 ble 9d04 <_Scheduler_CBS_Create_server+0xc4>
9c5c: e5903000 ldr r3, [r0]
9c60: e3530000 cmp r3, #0
9c64: da000026 ble 9d04 <_Scheduler_CBS_Create_server+0xc4>
params->deadline <= 0 ||
params->budget >= SCHEDULER_EDF_PRIO_MSB ||
params->deadline >= SCHEDULER_EDF_PRIO_MSB )
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9c68: e59f30a4 ldr r3, [pc, #164] ; 9d14 <_Scheduler_CBS_Create_server+0xd4>
9c6c: e5930000 ldr r0, [r3]
9c70: e3500000 cmp r0, #0
9c74: 0a00000d beq 9cb0 <_Scheduler_CBS_Create_server+0x70>
if ( !_Scheduler_CBS_Server_list[i] )
9c78: e59f8098 ldr r8, [pc, #152] ; 9d18 <_Scheduler_CBS_Create_server+0xd8>
9c7c: e5986000 ldr r6, [r8]
9c80: e596a000 ldr sl, [r6]
9c84: e35a0000 cmp sl, #0
9c88: 11a02006 movne r2, r6
9c8c: 13a03000 movne r3, #0
9c90: 1a000003 bne 9ca4 <_Scheduler_CBS_Create_server+0x64>
9c94: ea000018 b 9cfc <_Scheduler_CBS_Create_server+0xbc>
9c98: e5b21004 ldr r1, [r2, #4]!
9c9c: e3510000 cmp r1, #0
9ca0: 0a000004 beq 9cb8 <_Scheduler_CBS_Create_server+0x78>
params->deadline <= 0 ||
params->budget >= SCHEDULER_EDF_PRIO_MSB ||
params->deadline >= SCHEDULER_EDF_PRIO_MSB )
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9ca4: e2833001 add r3, r3, #1
9ca8: e1530000 cmp r3, r0
9cac: 1afffff9 bne 9c98 <_Scheduler_CBS_Create_server+0x58>
if ( !_Scheduler_CBS_Server_list[i] )
break;
}
if ( i == _Scheduler_CBS_Maximum_servers )
return SCHEDULER_CBS_ERROR_FULL;
9cb0: e3e00019 mvn r0, #25
9cb4: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
params->deadline <= 0 ||
params->budget >= SCHEDULER_EDF_PRIO_MSB ||
params->deadline >= SCHEDULER_EDF_PRIO_MSB )
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
9cb8: e1a0a103 lsl sl, r3, #2
}
if ( i == _Scheduler_CBS_Maximum_servers )
return SCHEDULER_CBS_ERROR_FULL;
*server_id = i;
9cbc: e5873000 str r3, [r7]
_Scheduler_CBS_Server_list[*server_id] = (Scheduler_CBS_Server *)
_Workspace_Allocate( sizeof(Scheduler_CBS_Server) );
9cc0: e3a00010 mov r0, #16
9cc4: eb00079d bl bb40 <_Workspace_Allocate>
if ( i == _Scheduler_CBS_Maximum_servers )
return SCHEDULER_CBS_ERROR_FULL;
*server_id = i;
_Scheduler_CBS_Server_list[*server_id] = (Scheduler_CBS_Server *)
9cc8: e786000a str r0, [r6, sl]
_Workspace_Allocate( sizeof(Scheduler_CBS_Server) );
the_server = _Scheduler_CBS_Server_list[*server_id];
9ccc: e5972000 ldr r2, [r7]
9cd0: e5983000 ldr r3, [r8]
9cd4: e7933102 ldr r3, [r3, r2, lsl #2]
if ( !the_server )
9cd8: e3530000 cmp r3, #0
9cdc: 0a00000a beq 9d0c <_Scheduler_CBS_Create_server+0xcc>
return SCHEDULER_CBS_ERROR_NO_MEMORY;
the_server->parameters = *params;
9ce0: e8940003 ldm r4, {r0, r1}
the_server->task_id = -1;
9ce4: e3e02000 mvn r2, #0
_Workspace_Allocate( sizeof(Scheduler_CBS_Server) );
the_server = _Scheduler_CBS_Server_list[*server_id];
if ( !the_server )
return SCHEDULER_CBS_ERROR_NO_MEMORY;
the_server->parameters = *params;
9ce8: e9830003 stmib r3, {r0, r1}
the_server->task_id = -1;
9cec: e5832000 str r2, [r3]
the_server->cbs_budget_overrun = budget_overrun_callback;
9cf0: e583500c str r5, [r3, #12]
return SCHEDULER_CBS_OK;
9cf4: e3a00000 mov r0, #0
9cf8: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
params->budget >= SCHEDULER_EDF_PRIO_MSB ||
params->deadline >= SCHEDULER_EDF_PRIO_MSB )
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
for ( i = 0; i<_Scheduler_CBS_Maximum_servers; i++ ) {
if ( !_Scheduler_CBS_Server_list[i] )
9cfc: e1a0300a mov r3, sl
9d00: eaffffed b 9cbc <_Scheduler_CBS_Create_server+0x7c>
if ( params->budget <= 0 ||
params->deadline <= 0 ||
params->budget >= SCHEDULER_EDF_PRIO_MSB ||
params->deadline >= SCHEDULER_EDF_PRIO_MSB )
return SCHEDULER_CBS_ERROR_INVALID_PARAMETER;
9d04: e3e00011 mvn r0, #17
9d08: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
*server_id = i;
_Scheduler_CBS_Server_list[*server_id] = (Scheduler_CBS_Server *)
_Workspace_Allocate( sizeof(Scheduler_CBS_Server) );
the_server = _Scheduler_CBS_Server_list[*server_id];
if ( !the_server )
return SCHEDULER_CBS_ERROR_NO_MEMORY;
9d0c: e3e00010 mvn r0, #16 <== NOT EXECUTED
the_server->parameters = *params;
the_server->task_id = -1;
the_server->cbs_budget_overrun = budget_overrun_callback;
return SCHEDULER_CBS_OK;
}
9d10: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc} <== NOT EXECUTED
00007274 <_TOD_Validate>:
};
bool _TOD_Validate(
const rtems_time_of_day *the_tod
)
{
7274: e92d4010 push {r4, lr}
uint32_t days_in_month;
uint32_t ticks_per_second;
ticks_per_second = TOD_MICROSECONDS_PER_SECOND /
rtems_configuration_get_microseconds_per_tick();
if ((!the_tod) ||
7278: e2504000 subs r4, r0, #0
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
(the_tod->month == 0) ||
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
(the_tod->year < TOD_BASE_YEAR) ||
(the_tod->day == 0) )
return false;
727c: 01a00004 moveq r0, r4
uint32_t days_in_month;
uint32_t ticks_per_second;
ticks_per_second = TOD_MICROSECONDS_PER_SECOND /
rtems_configuration_get_microseconds_per_tick();
if ((!the_tod) ||
7280: 08bd8010 popeq {r4, pc}
)
{
uint32_t days_in_month;
uint32_t ticks_per_second;
ticks_per_second = TOD_MICROSECONDS_PER_SECOND /
7284: e59f3098 ldr r3, [pc, #152] ; 7324 <_TOD_Validate+0xb0>
7288: e59f0098 ldr r0, [pc, #152] ; 7328 <_TOD_Validate+0xb4>
728c: e593100c ldr r1, [r3, #12]
7290: eb00454b bl 187c4 <__aeabi_uidiv>
rtems_configuration_get_microseconds_per_tick();
if ((!the_tod) ||
7294: e5943018 ldr r3, [r4, #24]
7298: e1500003 cmp r0, r3
729c: 9a00001c bls 7314 <_TOD_Validate+0xa0>
(the_tod->ticks >= ticks_per_second) ||
72a0: e5943014 ldr r3, [r4, #20]
72a4: e353003b cmp r3, #59 ; 0x3b
72a8: 8a000019 bhi 7314 <_TOD_Validate+0xa0>
(the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
72ac: e5943010 ldr r3, [r4, #16]
72b0: e353003b cmp r3, #59 ; 0x3b
72b4: 8a000016 bhi 7314 <_TOD_Validate+0xa0>
(the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
72b8: e594300c ldr r3, [r4, #12]
72bc: e3530017 cmp r3, #23
72c0: 8a000013 bhi 7314 <_TOD_Validate+0xa0>
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
(the_tod->month == 0) ||
72c4: e5940004 ldr r0, [r4, #4]
rtems_configuration_get_microseconds_per_tick();
if ((!the_tod) ||
(the_tod->ticks >= ticks_per_second) ||
(the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
(the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
72c8: e3500000 cmp r0, #0
72cc: 08bd8010 popeq {r4, pc}
(the_tod->month == 0) ||
72d0: e350000c cmp r0, #12
72d4: 8a00000e bhi 7314 <_TOD_Validate+0xa0>
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
(the_tod->year < TOD_BASE_YEAR) ||
72d8: e5943000 ldr r3, [r4]
(the_tod->ticks >= ticks_per_second) ||
(the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
(the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
(the_tod->month == 0) ||
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
72dc: e59f2048 ldr r2, [pc, #72] ; 732c <_TOD_Validate+0xb8>
72e0: e1530002 cmp r3, r2
72e4: 9a00000a bls 7314 <_TOD_Validate+0xa0>
(the_tod->year < TOD_BASE_YEAR) ||
(the_tod->day == 0) )
72e8: e5944008 ldr r4, [r4, #8]
(the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
(the_tod->minute >= TOD_MINUTES_PER_HOUR) ||
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
(the_tod->month == 0) ||
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
(the_tod->year < TOD_BASE_YEAR) ||
72ec: e3540000 cmp r4, #0
72f0: 0a000009 beq 731c <_TOD_Validate+0xa8>
(the_tod->day == 0) )
return false;
if ( (the_tod->year % 4) == 0 )
72f4: e3130003 tst r3, #3
days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
else
days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
72f8: e59f3030 ldr r3, [pc, #48] ; 7330 <_TOD_Validate+0xbc>
(the_tod->year < TOD_BASE_YEAR) ||
(the_tod->day == 0) )
return false;
if ( (the_tod->year % 4) == 0 )
days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
72fc: 0280000d addeq r0, r0, #13
else
days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
7300: e7930100 ldr r0, [r3, r0, lsl #2]
const uint32_t _TOD_Days_per_month[ 2 ][ 13 ] = {
{ 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 },
{ 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }
};
bool _TOD_Validate(
7304: e1500004 cmp r0, r4
7308: 33a00000 movcc r0, #0
730c: 23a00001 movcs r0, #1
7310: e8bd8010 pop {r4, pc}
(the_tod->hour >= TOD_HOURS_PER_DAY) ||
(the_tod->month == 0) ||
(the_tod->month > TOD_MONTHS_PER_YEAR) ||
(the_tod->year < TOD_BASE_YEAR) ||
(the_tod->day == 0) )
return false;
7314: e3a00000 mov r0, #0
7318: e8bd8010 pop {r4, pc}
731c: e1a00004 mov r0, r4 <== NOT EXECUTED
if ( the_tod->day > days_in_month )
return false;
return true;
}
7320: e8bd8010 pop {r4, pc} <== NOT EXECUTED
00009390 <_Thread_queue_Enqueue_priority>:
Priority_Control priority;
States_Control block_state;
_Chain_Initialize_empty( &the_thread->Wait.Block2n );
priority = the_thread->current_priority;
9390: e5913014 ldr r3, [r1, #20]
RTEMS_INLINE_ROUTINE uint32_t _Thread_queue_Header_number (
Priority_Control the_priority
)
{
return (the_priority / TASK_QUEUE_DATA_PRIORITIES_PER_HEADER);
9394: e1a0c323 lsr ip, r3, #6
Thread_blocking_operation_States _Thread_queue_Enqueue_priority (
Thread_queue_Control *the_thread_queue,
Thread_Control *the_thread,
ISR_Level *level_p
)
{
9398: e92d07f0 push {r4, r5, r6, r7, r8, r9, sl}
_Chain_Initialize_empty( &the_thread->Wait.Block2n );
priority = the_thread->current_priority;
header_index = _Thread_queue_Header_number( priority );
header = &the_thread_queue->Queues.Priority[ header_index ];
939c: e08cc08c add ip, ip, ip, lsl #1
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(
Chain_Control *the_chain
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
93a0: e281503c add r5, r1, #60 ; 0x3c
block_state = the_thread_queue->state;
if ( _Thread_queue_Is_reverse_search( priority ) )
93a4: e3130020 tst r3, #32
93a8: e2814038 add r4, r1, #56 ; 0x38
head->next = tail;
93ac: e5815038 str r5, [r1, #56] ; 0x38
_Chain_Initialize_empty( &the_thread->Wait.Block2n );
priority = the_thread->current_priority;
header_index = _Thread_queue_Header_number( priority );
header = &the_thread_queue->Queues.Priority[ header_index ];
93b0: e080c10c add ip, r0, ip, lsl #2
head->previous = NULL;
93b4: e3a05000 mov r5, #0
93b8: e581503c str r5, [r1, #60] ; 0x3c
tail->previous = head;
93bc: e5814040 str r4, [r1, #64] ; 0x40
block_state = the_thread_queue->state;
93c0: e5906038 ldr r6, [r0, #56] ; 0x38
93c4: 028c9004 addeq r9, ip, #4
93c8: 159f9164 ldrne r9, [pc, #356] ; 9534 <_Thread_queue_Enqueue_priority+0x1a4>
if ( _Thread_queue_Is_reverse_search( priority ) )
93cc: 1a00001b bne 9440 <_Thread_queue_Enqueue_priority+0xb0>
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
93d0: e10fa000 mrs sl, CPSR
93d4: e38a4080 orr r4, sl, #128 ; 0x80
93d8: e129f004 msr CPSR_fc, r4
93dc: e1a0800a mov r8, sl
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
Chain_Control *the_chain
)
{
return _Chain_Head( the_chain )->next;
93e0: e59c4000 ldr r4, [ip]
restart_forward_search:
search_priority = PRIORITY_MINIMUM - 1;
_ISR_Disable( level );
search_thread = (Thread_Control *) _Chain_First( header );
while ( !_Chain_Is_tail( header, (Chain_Node *)search_thread ) ) {
93e4: e1540009 cmp r4, r9
93e8: 1a000009 bne 9414 <_Thread_queue_Enqueue_priority+0x84>
93ec: ea00004e b 952c <_Thread_queue_Enqueue_priority+0x19c>
static inline void arm_interrupt_flash( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
93f0: e10f7000 mrs r7, CPSR
93f4: e129f00a msr CPSR_fc, sl
93f8: e129f007 msr CPSR_fc, r7
RTEMS_INLINE_ROUTINE bool _States_Are_set (
States_Control the_states,
States_Control mask
)
{
return ( (the_states & mask) != STATES_READY);
93fc: e5947010 ldr r7, [r4, #16]
search_priority = search_thread->current_priority;
if ( priority <= search_priority )
break;
#endif
_ISR_Flash( level );
if ( !_States_Are_set( search_thread->current_state, block_state) ) {
9400: e1160007 tst r6, r7
9404: 0a000031 beq 94d0 <_Thread_queue_Enqueue_priority+0x140>
_ISR_Enable( level );
goto restart_forward_search;
}
search_thread =
9408: e5944000 ldr r4, [r4]
restart_forward_search:
search_priority = PRIORITY_MINIMUM - 1;
_ISR_Disable( level );
search_thread = (Thread_Control *) _Chain_First( header );
while ( !_Chain_Is_tail( header, (Chain_Node *)search_thread ) ) {
940c: e1540009 cmp r4, r9
9410: 0a000002 beq 9420 <_Thread_queue_Enqueue_priority+0x90>
search_priority = search_thread->current_priority;
9414: e5945014 ldr r5, [r4, #20]
if ( priority <= search_priority )
9418: e1530005 cmp r3, r5
941c: 8afffff3 bhi 93f0 <_Thread_queue_Enqueue_priority+0x60>
}
search_thread =
(Thread_Control *)search_thread->Object.Node.next;
}
if ( the_thread_queue->sync_state !=
9420: e590c030 ldr ip, [r0, #48] ; 0x30
9424: e35c0001 cmp ip, #1
9428: 0a00002a beq 94d8 <_Thread_queue_Enqueue_priority+0x148>
* For example, the blocking thread could have been given
* the mutex by an ISR or timed out.
*
* WARNING! Returning with interrupts disabled!
*/
*level_p = level;
942c: e5828000 str r8, [r2]
return the_thread_queue->sync_state;
}
9430: e1a0000c mov r0, ip
9434: e8bd07f0 pop {r4, r5, r6, r7, r8, r9, sl}
9438: e12fff1e bx lr
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
943c: e129f00a msr CPSR_fc, sl <== NOT EXECUTED
the_thread->Wait.queue = the_thread_queue;
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
restart_reverse_search:
search_priority = PRIORITY_MAXIMUM + 1;
9440: e5d95000 ldrb r5, [r9]
9444: e2855001 add r5, r5, #1
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
9448: e10fa000 mrs sl, CPSR
944c: e38a4080 orr r4, sl, #128 ; 0x80
9450: e129f004 msr CPSR_fc, r4
9454: e1a0800a mov r8, sl
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Last(
Chain_Control *the_chain
)
{
return _Chain_Tail( the_chain )->previous;
9458: e59c4008 ldr r4, [ip, #8]
_ISR_Disable( level );
search_thread = (Thread_Control *) _Chain_Last( header );
while ( !_Chain_Is_head( header, (Chain_Node *)search_thread ) ) {
945c: e154000c cmp r4, ip
9460: 1a000009 bne 948c <_Thread_queue_Enqueue_priority+0xfc>
9464: ea00000b b 9498 <_Thread_queue_Enqueue_priority+0x108>
static inline void arm_interrupt_flash( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
9468: e10f7000 mrs r7, CPSR
946c: e129f00a msr CPSR_fc, sl
9470: e129f007 msr CPSR_fc, r7
9474: e5947010 ldr r7, [r4, #16]
search_priority = search_thread->current_priority;
if ( priority >= search_priority )
break;
#endif
_ISR_Flash( level );
if ( !_States_Are_set( search_thread->current_state, block_state) ) {
9478: e1160007 tst r6, r7
947c: 0affffee beq 943c <_Thread_queue_Enqueue_priority+0xac>
_ISR_Enable( level );
goto restart_reverse_search;
}
search_thread = (Thread_Control *)
9480: e5944004 ldr r4, [r4, #4]
restart_reverse_search:
search_priority = PRIORITY_MAXIMUM + 1;
_ISR_Disable( level );
search_thread = (Thread_Control *) _Chain_Last( header );
while ( !_Chain_Is_head( header, (Chain_Node *)search_thread ) ) {
9484: e154000c cmp r4, ip
9488: 0a000002 beq 9498 <_Thread_queue_Enqueue_priority+0x108>
search_priority = search_thread->current_priority;
948c: e5945014 ldr r5, [r4, #20]
if ( priority >= search_priority )
9490: e1530005 cmp r3, r5
9494: 3afffff3 bcc 9468 <_Thread_queue_Enqueue_priority+0xd8>
}
search_thread = (Thread_Control *)
search_thread->Object.Node.previous;
}
if ( the_thread_queue->sync_state !=
9498: e590c030 ldr ip, [r0, #48] ; 0x30
949c: e35c0001 cmp ip, #1
94a0: 1affffe1 bne 942c <_Thread_queue_Enqueue_priority+0x9c>
THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED )
goto synchronize;
the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED;
if ( priority == search_priority )
94a4: e1530005 cmp r3, r5
if ( the_thread_queue->sync_state !=
THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED )
goto synchronize;
the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED;
94a8: e3a03000 mov r3, #0
94ac: e5803030 str r3, [r0, #48] ; 0x30
if ( priority == search_priority )
94b0: 0a000014 beq 9508 <_Thread_queue_Enqueue_priority+0x178>
goto equal_priority;
search_node = (Chain_Node *) search_thread;
next_node = search_node->next;
94b4: e5943000 ldr r3, [r4]
the_node = (Chain_Node *) the_thread;
the_node->next = next_node;
94b8: e8810018 stm r1, {r3, r4}
the_node->previous = search_node;
search_node->next = the_node;
next_node->previous = the_node;
94bc: e5831004 str r1, [r3, #4]
next_node = search_node->next;
the_node = (Chain_Node *) the_thread;
the_node->next = next_node;
the_node->previous = search_node;
search_node->next = the_node;
94c0: e5841000 str r1, [r4]
next_node->previous = the_node;
the_thread->Wait.queue = the_thread_queue;
94c4: e5810044 str r0, [r1, #68] ; 0x44
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
94c8: e129f00a msr CPSR_fc, sl
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
94cc: eaffffd7 b 9430 <_Thread_queue_Enqueue_priority+0xa0>
94d0: e129f00a msr CPSR_fc, sl
94d4: eaffffbd b 93d0 <_Thread_queue_Enqueue_priority+0x40>
THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED )
goto synchronize;
the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED;
if ( priority == search_priority )
94d8: e1530005 cmp r3, r5
if ( the_thread_queue->sync_state !=
THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED )
goto synchronize;
the_thread_queue->sync_state = THREAD_BLOCKING_OPERATION_SYNCHRONIZED;
94dc: e3a03000 mov r3, #0
94e0: e5803030 str r3, [r0, #48] ; 0x30
if ( priority == search_priority )
94e4: 0a000007 beq 9508 <_Thread_queue_Enqueue_priority+0x178>
goto equal_priority;
search_node = (Chain_Node *) search_thread;
previous_node = search_node->previous;
94e8: e5943004 ldr r3, [r4, #4]
the_node = (Chain_Node *) the_thread;
the_node->next = search_node;
94ec: e5814000 str r4, [r1]
the_node->previous = previous_node;
94f0: e5813004 str r3, [r1, #4]
previous_node->next = the_node;
94f4: e5831000 str r1, [r3]
search_node->previous = the_node;
94f8: e5841004 str r1, [r4, #4]
the_thread->Wait.queue = the_thread_queue;
94fc: e5810044 str r0, [r1, #68] ; 0x44
9500: e129f00a msr CPSR_fc, sl
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
9504: eaffffc9 b 9430 <_Thread_queue_Enqueue_priority+0xa0>
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
equal_priority: /* add at end of priority group */
search_node = _Chain_Tail( &search_thread->Wait.Block2n );
previous_node = search_node->previous;
9508: e5943040 ldr r3, [r4, #64] ; 0x40
the_thread->Wait.queue = the_thread_queue;
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
equal_priority: /* add at end of priority group */
search_node = _Chain_Tail( &search_thread->Wait.Block2n );
950c: e284203c add r2, r4, #60 ; 0x3c
previous_node = search_node->previous;
the_node = (Chain_Node *) the_thread;
the_node->next = search_node;
the_node->previous = previous_node;
9510: e881000c stm r1, {r2, r3}
previous_node->next = the_node;
9514: e5831000 str r1, [r3]
search_node->previous = the_node;
9518: e5841040 str r1, [r4, #64] ; 0x40
the_thread->Wait.queue = the_thread_queue;
951c: e5810044 str r0, [r1, #68] ; 0x44
9520: e129f008 msr CPSR_fc, r8
_ISR_Enable( level );
return THREAD_BLOCKING_OPERATION_NOTHING_HAPPENED;
9524: e3a0c001 mov ip, #1
9528: eaffffc0 b 9430 <_Thread_queue_Enqueue_priority+0xa0>
if ( _Thread_queue_Is_reverse_search( priority ) )
goto restart_reverse_search;
restart_forward_search:
search_priority = PRIORITY_MINIMUM - 1;
952c: e3e05000 mvn r5, #0
9530: eaffffba b 9420 <_Thread_queue_Enqueue_priority+0x90>
00016d24 <_Timer_server_Body>:
* @a arg points to the corresponding timer server control block.
*/
static rtems_task _Timer_server_Body(
rtems_task_argument arg
)
{
16d24: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
16d28: e24dd024 sub sp, sp, #36 ; 0x24
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
16d2c: e28d6018 add r6, sp, #24
16d30: e28d1010 add r1, sp, #16
16d34: e2862004 add r2, r6, #4
16d38: e58d100c str r1, [sp, #12]
16d3c: e58d2018 str r2, [sp, #24]
head->previous = NULL;
tail->previous = head;
16d40: e28d100c add r1, sp, #12
16d44: e28d201c add r2, sp, #28
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
head->previous = NULL;
16d48: e3a03000 mov r3, #0
tail->previous = head;
16d4c: e58d1014 str r1, [sp, #20]
16d50: e58d2000 str r2, [sp]
16d54: e2801008 add r1, r0, #8
16d58: e2802040 add r2, r0, #64 ; 0x40
16d5c: e2807068 add r7, r0, #104 ; 0x68
16d60: e2805030 add r5, r0, #48 ; 0x30
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
head->previous = NULL;
16d64: e58d3010 str r3, [sp, #16]
16d68: e58d301c str r3, [sp, #28]
tail->previous = head;
16d6c: e58d6020 str r6, [sp, #32]
16d70: e59f81b8 ldr r8, [pc, #440] ; 16f30 <_Timer_server_Body+0x20c>
16d74: e59f91b8 ldr r9, [pc, #440] ; 16f34 <_Timer_server_Body+0x210>
16d78: e58d1004 str r1, [sp, #4]
16d7c: e58d2008 str r2, [sp, #8]
* service routine may remove a watchdog from the chain.
*/
_ISR_Disable( level );
watchdog = (Watchdog_Control *) _Chain_Get_unprotected( &fire_chain );
if ( watchdog != NULL ) {
watchdog->state = WATCHDOG_INACTIVE;
16d80: e1a0a007 mov sl, r7
* @a arg points to the corresponding timer server control block.
*/
static rtems_task _Timer_server_Body(
rtems_task_argument arg
)
{
16d84: e1a04000 mov r4, r0
16d88: e28db010 add fp, sp, #16
* service routine may remove a watchdog from the chain.
*/
_ISR_Disable( level );
watchdog = (Watchdog_Control *) _Chain_Get_unprotected( &fire_chain );
if ( watchdog != NULL ) {
watchdog->state = WATCHDOG_INACTIVE;
16d8c: e1a07005 mov r7, r5
{
/*
* Afterwards all timer inserts are directed to this chain and the interval
* and TOD chains will be no more modified by other parties.
*/
ts->insert_chain = insert_chain;
16d90: e28d300c add r3, sp, #12
16d94: e5843078 str r3, [r4, #120] ; 0x78
static void _Timer_server_Process_interval_watchdogs(
Timer_server_Watchdogs *watchdogs,
Chain_Control *fire_chain
)
{
Watchdog_Interval snapshot = _Watchdog_Ticks_since_boot;
16d98: e5983000 ldr r3, [r8]
/*
* We assume adequate unsigned arithmetic here.
*/
Watchdog_Interval delta = snapshot - watchdogs->last_snapshot;
16d9c: e594103c ldr r1, [r4, #60] ; 0x3c
watchdogs->last_snapshot = snapshot;
_Watchdog_Adjust_to_chain( &watchdogs->Chain, delta, fire_chain );
16da0: e1a00007 mov r0, r7
16da4: e0611003 rsb r1, r1, r3
16da8: e1a02006 mov r2, r6
/*
* We assume adequate unsigned arithmetic here.
*/
Watchdog_Interval delta = snapshot - watchdogs->last_snapshot;
watchdogs->last_snapshot = snapshot;
16dac: e584303c str r3, [r4, #60] ; 0x3c
_Watchdog_Adjust_to_chain( &watchdogs->Chain, delta, fire_chain );
16db0: eb00113f bl 1b2b4 <_Watchdog_Adjust_to_chain>
16db4: e59f217c ldr r2, [pc, #380] ; 16f38 <_Timer_server_Body+0x214>
16db8: e8990003 ldm r9, {r0, r1}
16dbc: e3a03000 mov r3, #0
16dc0: eb004d32 bl 2a290 <__divdi3>
Timer_server_Watchdogs *watchdogs,
Chain_Control *fire_chain
)
{
Watchdog_Interval snapshot = (Watchdog_Interval) _TOD_Seconds_since_epoch();
Watchdog_Interval last_snapshot = watchdogs->last_snapshot;
16dc4: e5942074 ldr r2, [r4, #116] ; 0x74
/*
* Process the seconds chain. Start by checking that the Time
* of Day (TOD) has not been set backwards. If it has then
* we want to adjust the watchdogs->Chain to indicate this.
*/
if ( snapshot > last_snapshot ) {
16dc8: e1500002 cmp r0, r2
16dcc: e1a05000 mov r5, r0
16dd0: 8a000022 bhi 16e60 <_Timer_server_Body+0x13c>
* TOD has been set forward.
*/
delta = snapshot - last_snapshot;
_Watchdog_Adjust_to_chain( &watchdogs->Chain, delta, fire_chain );
} else if ( snapshot < last_snapshot ) {
16dd4: 3a000018 bcc 16e3c <_Timer_server_Body+0x118>
*/
delta = last_snapshot - snapshot;
_Watchdog_Adjust( &watchdogs->Chain, WATCHDOG_BACKWARD, delta );
}
watchdogs->last_snapshot = snapshot;
16dd8: e5845074 str r5, [r4, #116] ; 0x74
}
static void _Timer_server_Process_insertions( Timer_server_Control *ts )
{
while ( true ) {
Timer_Control *timer = (Timer_Control *) _Chain_Get( ts->insert_chain );
16ddc: e5940078 ldr r0, [r4, #120] ; 0x78
16de0: eb0002d0 bl 17928 <_Chain_Get>
if ( timer == NULL ) {
16de4: e2501000 subs r1, r0, #0
16de8: 0a00000b beq 16e1c <_Timer_server_Body+0xf8>
static void _Timer_server_Insert_timer(
Timer_server_Control *ts,
Timer_Control *timer
)
{
if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) {
16dec: e5913038 ldr r3, [r1, #56] ; 0x38
16df0: e3530001 cmp r3, #1
16df4: 0a000015 beq 16e50 <_Timer_server_Body+0x12c>
_Watchdog_Insert( &ts->Interval_watchdogs.Chain, &timer->Ticker );
} else if ( timer->the_class == TIMER_TIME_OF_DAY_ON_TASK ) {
16df8: e3530003 cmp r3, #3
16dfc: 1afffff6 bne 16ddc <_Timer_server_Body+0xb8>
_Watchdog_Insert( &ts->TOD_watchdogs.Chain, &timer->Ticker );
16e00: e2811010 add r1, r1, #16
16e04: e1a0000a mov r0, sl
16e08: eb001152 bl 1b358 <_Watchdog_Insert>
}
static void _Timer_server_Process_insertions( Timer_server_Control *ts )
{
while ( true ) {
Timer_Control *timer = (Timer_Control *) _Chain_Get( ts->insert_chain );
16e0c: e5940078 ldr r0, [r4, #120] ; 0x78
16e10: eb0002c4 bl 17928 <_Chain_Get>
if ( timer == NULL ) {
16e14: e2501000 subs r1, r0, #0
16e18: 1afffff3 bne 16dec <_Timer_server_Body+0xc8>
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
16e1c: e10f2000 mrs r2, CPSR
16e20: e3823080 orr r3, r2, #128 ; 0x80
16e24: e129f003 msr CPSR_fc, r3
* body loop.
*/
_Timer_server_Process_insertions( ts );
_ISR_Disable( level );
if ( _Chain_Is_empty( insert_chain ) ) {
16e28: e59d300c ldr r3, [sp, #12]
16e2c: e153000b cmp r3, fp
16e30: 0a00000f beq 16e74 <_Timer_server_Body+0x150>
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
16e34: e129f002 msr CPSR_fc, r2 <== NOT EXECUTED
16e38: eaffffd6 b 16d98 <_Timer_server_Body+0x74> <== NOT EXECUTED
/*
* The current TOD is before the last TOD which indicates that
* TOD has been set backwards.
*/
delta = last_snapshot - snapshot;
_Watchdog_Adjust( &watchdogs->Chain, WATCHDOG_BACKWARD, delta );
16e3c: e1a0000a mov r0, sl
16e40: e3a01001 mov r1, #1
16e44: e0652002 rsb r2, r5, r2
16e48: eb0010eb bl 1b1fc <_Watchdog_Adjust>
16e4c: eaffffe1 b 16dd8 <_Timer_server_Body+0xb4>
Timer_server_Control *ts,
Timer_Control *timer
)
{
if ( timer->the_class == TIMER_INTERVAL_ON_TASK ) {
_Watchdog_Insert( &ts->Interval_watchdogs.Chain, &timer->Ticker );
16e50: e1a00007 mov r0, r7
16e54: e2811010 add r1, r1, #16
16e58: eb00113e bl 1b358 <_Watchdog_Insert>
16e5c: eaffffde b 16ddc <_Timer_server_Body+0xb8>
/*
* This path is for normal forward movement and cases where the
* TOD has been set forward.
*/
delta = snapshot - last_snapshot;
_Watchdog_Adjust_to_chain( &watchdogs->Chain, delta, fire_chain );
16e60: e0621005 rsb r1, r2, r5
16e64: e1a0000a mov r0, sl
16e68: e1a02006 mov r2, r6
16e6c: eb001110 bl 1b2b4 <_Watchdog_Adjust_to_chain>
16e70: eaffffd8 b 16dd8 <_Timer_server_Body+0xb4>
*/
_Timer_server_Process_insertions( ts );
_ISR_Disable( level );
if ( _Chain_Is_empty( insert_chain ) ) {
ts->insert_chain = NULL;
16e74: e5841078 str r1, [r4, #120] ; 0x78
16e78: e129f002 msr CPSR_fc, r2
_Chain_Initialize_empty( &fire_chain );
while ( true ) {
_Timer_server_Get_watchdogs_that_fire_now( ts, &insert_chain, &fire_chain );
if ( !_Chain_Is_empty( &fire_chain ) ) {
16e7c: e59d3018 ldr r3, [sp, #24]
16e80: e59d1000 ldr r1, [sp]
16e84: e1530001 cmp r3, r1
16e88: 1a00000a bne 16eb8 <_Timer_server_Body+0x194>
16e8c: ea000012 b 16edc <_Timer_server_Body+0x1b8>
Chain_Control *the_chain
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *old_first = head->next;
Chain_Node *new_first = old_first->next;
16e90: e5932000 ldr r2, [r3]
head->next = new_first;
new_first->previous = head;
16e94: e5826004 str r6, [r2, #4]
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *old_first = head->next;
Chain_Node *new_first = old_first->next;
head->next = new_first;
16e98: e58d2018 str r2, [sp, #24]
* service routine may remove a watchdog from the chain.
*/
_ISR_Disable( level );
watchdog = (Watchdog_Control *) _Chain_Get_unprotected( &fire_chain );
if ( watchdog != NULL ) {
watchdog->state = WATCHDOG_INACTIVE;
16e9c: e3a02000 mov r2, #0
16ea0: e5832008 str r2, [r3, #8]
16ea4: e129f001 msr CPSR_fc, r1
/*
* The timer server may block here and wait for resources or time.
* The system watchdogs are inactive and will remain inactive since
* the active flag of the timer server is true.
*/
(*watchdog->routine)( watchdog->id, watchdog->user_data );
16ea8: e2830020 add r0, r3, #32
16eac: e8900003 ldm r0, {r0, r1}
16eb0: e1a0e00f mov lr, pc
16eb4: e593f01c ldr pc, [r3, #28]
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
16eb8: e10f1000 mrs r1, CPSR
16ebc: e3813080 orr r3, r1, #128 ; 0x80
16ec0: e129f003 msr CPSR_fc, r3
*/
RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_head( the_chain )->next;
16ec4: e59d3018 ldr r3, [sp, #24]
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
Chain_Control *the_chain
)
{
if ( !_Chain_Is_empty(the_chain))
16ec8: e59d2000 ldr r2, [sp]
16ecc: e1530002 cmp r3, r2
16ed0: 1affffee bne 16e90 <_Timer_server_Body+0x16c>
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
16ed4: e129f001 msr CPSR_fc, r1
16ed8: eaffffac b 16d90 <_Timer_server_Body+0x6c>
}
} else {
ts->active = false;
16edc: e3a03000 mov r3, #0
16ee0: e5c4307c strb r3, [r4, #124] ; 0x7c
*
* This rountine increments the thread dispatch level
*/
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_increment_disable_level(void)
{
uint32_t level = _Thread_Dispatch_disable_level;
16ee4: e59f1050 ldr r1, [pc, #80] ; 16f3c <_Timer_server_Body+0x218>
16ee8: e5913000 ldr r3, [r1]
++level;
16eec: e2833001 add r3, r3, #1
_Thread_Dispatch_disable_level = level;
16ef0: e5813000 str r3, [r1]
/*
* Block until there is something to do.
*/
_Thread_Disable_dispatch();
_Thread_Set_state( ts->thread, STATES_DELAYING );
16ef4: e3a01008 mov r1, #8
16ef8: e5940000 ldr r0, [r4]
16efc: eb000fdc bl 1ae74 <_Thread_Set_state>
_Timer_server_Reset_interval_system_watchdog( ts );
16f00: e1a00004 mov r0, r4
16f04: ebffff5a bl 16c74 <_Timer_server_Reset_interval_system_watchdog>
_Timer_server_Reset_tod_system_watchdog( ts );
16f08: e1a00004 mov r0, r4
16f0c: ebffff6e bl 16ccc <_Timer_server_Reset_tod_system_watchdog>
_Thread_Enable_dispatch();
16f10: eb000d9b bl 1a584 <_Thread_Enable_dispatch>
ts->active = true;
16f14: e3a03001 mov r3, #1
16f18: e5c4307c strb r3, [r4, #124] ; 0x7c
static void _Timer_server_Stop_interval_system_watchdog(
Timer_server_Control *ts
)
{
_Watchdog_Remove( &ts->Interval_watchdogs.System_watchdog );
16f1c: e59d0004 ldr r0, [sp, #4]
16f20: eb001179 bl 1b50c <_Watchdog_Remove>
static void _Timer_server_Stop_tod_system_watchdog(
Timer_server_Control *ts
)
{
_Watchdog_Remove( &ts->TOD_watchdogs.System_watchdog );
16f24: e59d0008 ldr r0, [sp, #8]
16f28: eb001177 bl 1b50c <_Watchdog_Remove>
16f2c: eaffff97 b 16d90 <_Timer_server_Body+0x6c>
000098e4 <_User_extensions_Thread_exitted_visitor>:
Thread_Control *executing,
void *arg,
const User_extensions_Table *callouts
)
{
User_extensions_thread_exitted_extension callout = callouts->thread_exitted;
98e4: e5923018 ldr r3, [r2, #24]
if ( callout != NULL ) {
98e8: e3530000 cmp r3, #0
void _User_extensions_Thread_exitted_visitor(
Thread_Control *executing,
void *arg,
const User_extensions_Table *callouts
)
{
98ec: e52de004 push {lr} ; (str lr, [sp, #-4]!)
User_extensions_thread_exitted_extension callout = callouts->thread_exitted;
if ( callout != NULL ) {
98f0: 049df004 popeq {pc} ; (ldreq pc, [sp], #4)
(*callout)( executing );
98f4: e1a0e00f mov lr, pc
98f8: e12fff13 bx r3
98fc: e49df004 pop {pc} ; (ldr pc, [sp], #4) <== NOT EXECUTED
0000b250 <_Watchdog_Adjust>:
void _Watchdog_Adjust(
Chain_Control *header,
Watchdog_Adjust_directions direction,
Watchdog_Interval units
)
{
b250: e92d41f0 push {r4, r5, r6, r7, r8, lr}
b254: e1a04000 mov r4, r0
b258: e1a05002 mov r5, r2
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
b25c: e10f2000 mrs r2, CPSR
b260: e3823080 orr r3, r2, #128 ; 0x80
b264: e129f003 msr CPSR_fc, r3
*/
RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_head( the_chain )->next;
b268: e1a07000 mov r7, r0
b26c: e4973004 ldr r3, [r7], #4
* hence the compiler must not assume *header to remain
* unmodified across that call.
*
* Till Straumann, 7/2003
*/
if ( !_Chain_Is_empty( header ) ) {
b270: e1530007 cmp r3, r7
b274: 0a000017 beq b2d8 <_Watchdog_Adjust+0x88>
switch ( direction ) {
b278: e3510000 cmp r1, #0
b27c: 1a000017 bne b2e0 <_Watchdog_Adjust+0x90>
case WATCHDOG_BACKWARD:
_Watchdog_First( header )->delta_interval += units;
break;
case WATCHDOG_FORWARD:
while ( units ) {
b280: e3550000 cmp r5, #0
b284: 0a000013 beq b2d8 <_Watchdog_Adjust+0x88>
if ( units < _Watchdog_First( header )->delta_interval ) {
b288: e5936010 ldr r6, [r3, #16]
b28c: e1550006 cmp r5, r6
_Watchdog_First( header )->delta_interval -= units;
break;
} else {
units -= _Watchdog_First( header )->delta_interval;
_Watchdog_First( header )->delta_interval = 1;
b290: 23a08001 movcs r8, #1
case WATCHDOG_BACKWARD:
_Watchdog_First( header )->delta_interval += units;
break;
case WATCHDOG_FORWARD:
while ( units ) {
if ( units < _Watchdog_First( header )->delta_interval ) {
b294: 2a000005 bcs b2b0 <_Watchdog_Adjust+0x60>
b298: ea000017 b b2fc <_Watchdog_Adjust+0xac> <== NOT EXECUTED
switch ( direction ) {
case WATCHDOG_BACKWARD:
_Watchdog_First( header )->delta_interval += units;
break;
case WATCHDOG_FORWARD:
while ( units ) {
b29c: e0555006 subs r5, r5, r6
b2a0: 0a00000c beq b2d8 <_Watchdog_Adjust+0x88>
if ( units < _Watchdog_First( header )->delta_interval ) {
b2a4: e5936010 ldr r6, [r3, #16]
b2a8: e1560005 cmp r6, r5
b2ac: 8a000012 bhi b2fc <_Watchdog_Adjust+0xac>
_Watchdog_First( header )->delta_interval -= units;
break;
} else {
units -= _Watchdog_First( header )->delta_interval;
_Watchdog_First( header )->delta_interval = 1;
b2b0: e5838010 str r8, [r3, #16]
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
b2b4: e129f002 msr CPSR_fc, r2
_ISR_Enable( level );
_Watchdog_Tickle( header );
b2b8: e1a00004 mov r0, r4
b2bc: eb0000a9 bl b568 <_Watchdog_Tickle>
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
b2c0: e10f2000 mrs r2, CPSR
b2c4: e3823080 orr r3, r2, #128 ; 0x80
b2c8: e129f003 msr CPSR_fc, r3
b2cc: e5943000 ldr r3, [r4]
_ISR_Disable( level );
if ( _Chain_Is_empty( header ) )
b2d0: e1570003 cmp r7, r3
b2d4: 1afffff0 bne b29c <_Watchdog_Adjust+0x4c>
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
b2d8: e129f002 msr CPSR_fc, r2
}
}
_ISR_Enable( level );
}
b2dc: e8bd81f0 pop {r4, r5, r6, r7, r8, pc}
* unmodified across that call.
*
* Till Straumann, 7/2003
*/
if ( !_Chain_Is_empty( header ) ) {
switch ( direction ) {
b2e0: e3510001 cmp r1, #1
b2e4: 1afffffb bne b2d8 <_Watchdog_Adjust+0x88>
case WATCHDOG_BACKWARD:
_Watchdog_First( header )->delta_interval += units;
b2e8: e5931010 ldr r1, [r3, #16]
b2ec: e0815005 add r5, r1, r5
b2f0: e5835010 str r5, [r3, #16]
b2f4: e129f002 msr CPSR_fc, r2
}
}
_ISR_Enable( level );
}
b2f8: e8bd81f0 pop {r4, r5, r6, r7, r8, pc}
_Watchdog_First( header )->delta_interval += units;
break;
case WATCHDOG_FORWARD:
while ( units ) {
if ( units < _Watchdog_First( header )->delta_interval ) {
_Watchdog_First( header )->delta_interval -= units;
b2fc: e0655006 rsb r5, r5, r6
b300: e5835010 str r5, [r3, #16]
break;
b304: eafffff3 b b2d8 <_Watchdog_Adjust+0x88>
00009d38 <_Workspace_Handler_initialization>:
void _Workspace_Handler_initialization(
Heap_Area *areas,
size_t area_count,
Heap_Initialization_or_extend_handler extend
)
{
9d38: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
uintptr_t remaining = rtems_configuration_get_work_space_size();
9d3c: e59f3120 ldr r3, [pc, #288] ; 9e64 <_Workspace_Handler_initialization+0x12c>
9d40: e5d3c032 ldrb ip, [r3, #50] ; 0x32
void _Workspace_Handler_initialization(
Heap_Area *areas,
size_t area_count,
Heap_Initialization_or_extend_handler extend
)
{
9d44: e24dd004 sub sp, sp, #4
Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
uintptr_t remaining = rtems_configuration_get_work_space_size();
9d48: e35c0000 cmp ip, #0
9d4c: 05937004 ldreq r7, [r3, #4]
void _Workspace_Handler_initialization(
Heap_Area *areas,
size_t area_count,
Heap_Initialization_or_extend_handler extend
)
{
9d50: e58d2000 str r2, [sp]
Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
uintptr_t remaining = rtems_configuration_get_work_space_size();
9d54: e5932000 ldr r2, [r3]
9d58: 13a07000 movne r7, #0
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9d5c: e3510000 cmp r1, #0
void _Workspace_Handler_initialization(
Heap_Area *areas,
size_t area_count,
Heap_Initialization_or_extend_handler extend
)
{
9d60: e1a08001 mov r8, r1
Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
uintptr_t remaining = rtems_configuration_get_work_space_size();
9d64: e0877002 add r7, r7, r2
bool do_zero = rtems_configuration_get_do_zero_of_workspace();
9d68: e5d3a030 ldrb sl, [r3, #48] ; 0x30
bool unified = rtems_configuration_get_unified_work_area();
9d6c: e5d3b031 ldrb fp, [r3, #49] ; 0x31
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9d70: 0a000033 beq 9e44 <_Workspace_Handler_initialization+0x10c>
Heap_Area *areas,
size_t area_count,
Heap_Initialization_or_extend_handler extend
)
{
Heap_Initialization_or_extend_handler init_or_extend = _Heap_Initialize;
9d74: e59f90ec ldr r9, [pc, #236] ; 9e68 <_Workspace_Handler_initialization+0x130>
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9d78: e1a04000 mov r4, r0
9d7c: e3a06000 mov r6, #0
9d80: ea00001d b 9dfc <_Workspace_Handler_initialization+0xc4>
if ( do_zero ) {
memset( area->begin, 0, area->size );
}
if ( area->size > overhead ) {
9d84: e5945004 ldr r5, [r4, #4]
9d88: e3550016 cmp r5, #22
9d8c: 9a000016 bls 9dec <_Workspace_Handler_initialization+0xb4>
uintptr_t space_available;
uintptr_t size;
if ( unified ) {
9d90: e35b0000 cmp fp, #0
9d94: 1a000004 bne 9dac <_Workspace_Handler_initialization+0x74>
size = area->size;
} else {
if ( remaining > 0 ) {
9d98: e3570000 cmp r7, #0
9d9c: 0a00001d beq 9e18 <_Workspace_Handler_initialization+0xe0>
size = remaining < area->size - overhead ?
9da0: e2453016 sub r3, r5, #22
remaining + overhead : area->size;
9da4: e1530007 cmp r3, r7
9da8: 82875016 addhi r5, r7, #22
} else {
size = 0;
}
}
space_available = (*init_or_extend)(
9dac: e1a02005 mov r2, r5
9db0: e3a03008 mov r3, #8
9db4: e59f00b0 ldr r0, [pc, #176] ; 9e6c <_Workspace_Handler_initialization+0x134>
9db8: e5941000 ldr r1, [r4]
9dbc: e1a0e00f mov lr, pc
9dc0: e12fff19 bx r9
area->begin,
size,
page_size
);
area->begin = (char *) area->begin + size;
9dc4: e5943000 ldr r3, [r4]
area->size -= size;
9dc8: e5942004 ldr r2, [r4, #4]
area->begin,
size,
page_size
);
area->begin = (char *) area->begin + size;
9dcc: e0833005 add r3, r3, r5
area->size -= size;
9dd0: e0655002 rsb r5, r5, r2
if ( space_available < remaining ) {
9dd4: e1500007 cmp r0, r7
size,
page_size
);
area->begin = (char *) area->begin + size;
area->size -= size;
9dd8: e8840028 stm r4, {r3, r5}
remaining -= space_available;
} else {
remaining = 0;
}
init_or_extend = extend;
9ddc: 359d9000 ldrcc r9, [sp]
9de0: 259d9000 ldrcs r9, [sp]
area->begin = (char *) area->begin + size;
area->size -= size;
if ( space_available < remaining ) {
remaining -= space_available;
9de4: 30607007 rsbcc r7, r0, r7
} else {
remaining = 0;
9de8: 23a07000 movcs r7, #0
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9dec: e2866001 add r6, r6, #1
9df0: e1560008 cmp r6, r8
9df4: e2844008 add r4, r4, #8
9df8: 0a000011 beq 9e44 <_Workspace_Handler_initialization+0x10c>
Heap_Area *area = &areas [i];
if ( do_zero ) {
9dfc: e35a0000 cmp sl, #0
9e00: 0affffdf beq 9d84 <_Workspace_Handler_initialization+0x4c>
memset( area->begin, 0, area->size );
9e04: e5940000 ldr r0, [r4]
9e08: e3a01000 mov r1, #0
9e0c: e5942004 ldr r2, [r4, #4]
9e10: eb0010e4 bl e1a8 <memset>
9e14: eaffffda b 9d84 <_Workspace_Handler_initialization+0x4c>
} else {
size = 0;
}
}
space_available = (*init_or_extend)(
9e18: e5941000 ldr r1, [r4] <== NOT EXECUTED
9e1c: e59f0048 ldr r0, [pc, #72] ; 9e6c <_Workspace_Handler_initialization+0x134><== NOT EXECUTED
9e20: e1a02007 mov r2, r7 <== NOT EXECUTED
9e24: e3a03008 mov r3, #8 <== NOT EXECUTED
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9e28: e2866001 add r6, r6, #1 <== NOT EXECUTED
} else {
size = 0;
}
}
space_available = (*init_or_extend)(
9e2c: e1a0e00f mov lr, pc <== NOT EXECUTED
9e30: e12fff19 bx r9 <== NOT EXECUTED
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9e34: e1560008 cmp r6, r8 <== NOT EXECUTED
remaining -= space_available;
} else {
remaining = 0;
}
init_or_extend = extend;
9e38: e59d9000 ldr r9, [sp] <== NOT EXECUTED
bool unified = rtems_configuration_get_unified_work_area();
uintptr_t page_size = CPU_HEAP_ALIGNMENT;
uintptr_t overhead = _Heap_Area_overhead( page_size );
size_t i;
for (i = 0; i < area_count; ++i) {
9e3c: e2844008 add r4, r4, #8 <== NOT EXECUTED
9e40: 1affffed bne 9dfc <_Workspace_Handler_initialization+0xc4> <== NOT EXECUTED
init_or_extend = extend;
}
}
if ( remaining > 0 ) {
9e44: e3570000 cmp r7, #0
9e48: 1a000001 bne 9e54 <_Workspace_Handler_initialization+0x11c>
INTERNAL_ERROR_CORE,
true,
INTERNAL_ERROR_TOO_LITTLE_WORKSPACE
);
}
}
9e4c: e28dd004 add sp, sp, #4
9e50: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}
init_or_extend = extend;
}
}
if ( remaining > 0 ) {
_Internal_error_Occurred(
9e54: e3a00000 mov r0, #0
9e58: e3a01001 mov r1, #1
9e5c: e3a02002 mov r2, #2
9e60: ebfff6e7 bl 7a04 <_Internal_error_Occurred>
0000f940 <rtems_event_system_receive>:
rtems_event_set event_in,
rtems_option option_set,
rtems_interval ticks,
rtems_event_set *event_out
)
{
f940: e92d4070 push {r4, r5, r6, lr}
rtems_status_code sc;
if ( event_out != NULL ) {
f944: e2535000 subs r5, r3, #0
rtems_event_set event_in,
rtems_option option_set,
rtems_interval ticks,
rtems_event_set *event_out
)
{
f948: e1a0c000 mov ip, r0
f94c: e24dd010 sub sp, sp, #16
} else {
*event_out = event->pending_events;
sc = RTEMS_SUCCESSFUL;
}
} else {
sc = RTEMS_INVALID_ADDRESS;
f950: 03a00009 moveq r0, #9
rtems_event_set *event_out
)
{
rtems_status_code sc;
if ( event_out != NULL ) {
f954: 0a000007 beq f978 <rtems_event_system_receive+0x38>
Thread_Control *executing = _Thread_Executing;
f958: e59f4058 ldr r4, [pc, #88] ; f9b8 <rtems_event_system_receive+0x78>
f95c: e5944008 ldr r4, [r4, #8]
RTEMS_API_Control *api = executing->API_Extensions[ THREAD_API_RTEMS ];
Event_Control *event = &api->System_event;
if ( !_Event_sets_Is_empty( event_in ) ) {
f960: e35c0000 cmp ip, #0
{
rtems_status_code sc;
if ( event_out != NULL ) {
Thread_Control *executing = _Thread_Executing;
RTEMS_API_Control *api = executing->API_Extensions[ THREAD_API_RTEMS ];
f964: e59460ec ldr r6, [r4, #236] ; 0xec
Event_Control *event = &api->System_event;
if ( !_Event_sets_Is_empty( event_in ) ) {
f968: 1a000004 bne f980 <rtems_event_system_receive+0x40>
);
_Thread_Enable_dispatch();
sc = executing->Wait.return_code;
} else {
*event_out = event->pending_events;
f96c: e5963004 ldr r3, [r6, #4] <== NOT EXECUTED
f970: e5853000 str r3, [r5] <== NOT EXECUTED
sc = RTEMS_SUCCESSFUL;
f974: e1a0000c mov r0, ip <== NOT EXECUTED
} else {
sc = RTEMS_INVALID_ADDRESS;
}
return sc;
}
f978: e28dd010 add sp, sp, #16
f97c: e8bd8070 pop {r4, r5, r6, pc}
*
* This rountine increments the thread dispatch level
*/
RTEMS_INLINE_ROUTINE uint32_t _Thread_Dispatch_increment_disable_level(void)
{
uint32_t level = _Thread_Dispatch_disable_level;
f980: e59fc034 ldr ip, [pc, #52] ; f9bc <rtems_event_system_receive+0x7c>
f984: e59ce000 ldr lr, [ip]
++level;
f988: e28ee001 add lr, lr, #1
_Thread_Dispatch_disable_level = level;
f98c: e58ce000 str lr, [ip]
RTEMS_API_Control *api = executing->API_Extensions[ THREAD_API_RTEMS ];
Event_Control *event = &api->System_event;
if ( !_Event_sets_Is_empty( event_in ) ) {
_Thread_Disable_dispatch();
_Event_Seize(
f990: e59fc028 ldr ip, [pc, #40] ; f9c0 <rtems_event_system_receive+0x80>
rtems_status_code sc;
if ( event_out != NULL ) {
Thread_Control *executing = _Thread_Executing;
RTEMS_API_Control *api = executing->API_Extensions[ THREAD_API_RTEMS ];
Event_Control *event = &api->System_event;
f994: e2866004 add r6, r6, #4
if ( !_Event_sets_Is_empty( event_in ) ) {
_Thread_Disable_dispatch();
_Event_Seize(
f998: e58dc008 str ip, [sp, #8]
f99c: e3a0c701 mov ip, #262144 ; 0x40000
f9a0: e58dc00c str ip, [sp, #12]
f9a4: e88d0050 stm sp, {r4, r6}
f9a8: ebffdeef bl 756c <_Event_Seize>
executing,
event,
&_System_event_Sync_state,
STATES_WAITING_FOR_SYSTEM_EVENT
);
_Thread_Enable_dispatch();
f9ac: ebffea0c bl a1e4 <_Thread_Enable_dispatch>
sc = executing->Wait.return_code;
f9b0: e5940034 ldr r0, [r4, #52] ; 0x34
f9b4: eaffffef b f978 <rtems_event_system_receive+0x38>
000077a4 <rtems_rbheap_allocate>:
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
77a4: e92d45f0 push {r4, r5, r6, r7, r8, sl, lr}
void *ptr = NULL;
rtems_chain_control *free_chain = &control->free_chunk_chain;
rtems_rbtree_control *chunk_tree = &control->chunk_tree;
uintptr_t alignment = control->alignment;
77a8: e5906030 ldr r6, [r0, #48] ; 0x30
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
77ac: e1a05001 mov r5, r1
77b0: e1a04000 mov r4, r0
#include <stdlib.h>
static uintptr_t align_up(uintptr_t alignment, uintptr_t value)
{
uintptr_t excess = value % alignment;
77b4: e1a00001 mov r0, r1
77b8: e1a01006 mov r1, r6
77bc: eb00411f bl 17c40 <__umodsi3>
if (excess > 0) {
77c0: e3500000 cmp r0, #0
77c4: 01a06005 moveq r6, r5
77c8: 03a03001 moveq r3, #1
77cc: 0a000004 beq 77e4 <rtems_rbheap_allocate+0x40>
value += alignment - excess;
77d0: e0856006 add r6, r5, r6 <== NOT EXECUTED
77d4: e0606006 rsb r6, r0, r6 <== NOT EXECUTED
77d8: e1550006 cmp r5, r6 <== NOT EXECUTED
77dc: 83a03000 movhi r3, #0 <== NOT EXECUTED
77e0: 93a03001 movls r3, #1 <== NOT EXECUTED
rtems_chain_control *free_chain = &control->free_chunk_chain;
rtems_rbtree_control *chunk_tree = &control->chunk_tree;
uintptr_t alignment = control->alignment;
uintptr_t aligned_size = align_up(alignment, size);
if (size > 0 && size <= aligned_size) {
77e4: e3550000 cmp r5, #0
77e8: 03a05000 moveq r5, #0
77ec: 12035001 andne r5, r3, #1
77f0: e3550000 cmp r5, #0
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
void *ptr = NULL;
77f4: 01a00005 moveq r0, r5
rtems_chain_control *free_chain = &control->free_chunk_chain;
rtems_rbtree_control *chunk_tree = &control->chunk_tree;
uintptr_t alignment = control->alignment;
uintptr_t aligned_size = align_up(alignment, size);
if (size > 0 && size <= aligned_size) {
77f8: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_First(
Chain_Control *the_chain
)
{
return _Chain_Head( the_chain )->next;
77fc: e1a02004 mov r2, r4
7800: e4927004 ldr r7, [r2], #4
{
rtems_chain_node *current = rtems_chain_first(free_chain);
const rtems_chain_node *tail = rtems_chain_tail(free_chain);
rtems_rbheap_chunk *big_enough = NULL;
while (current != tail && big_enough == NULL) {
7804: e1570002 cmp r7, r2
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
void *ptr = NULL;
7808: 03a00000 moveq r0, #0
{
rtems_chain_node *current = rtems_chain_first(free_chain);
const rtems_chain_node *tail = rtems_chain_tail(free_chain);
rtems_rbheap_chunk *big_enough = NULL;
while (current != tail && big_enough == NULL) {
780c: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
rtems_rbheap_chunk *free_chunk = (rtems_rbheap_chunk *) current;
if (free_chunk->size >= size) {
7810: e597801c ldr r8, [r7, #28]
7814: e1560008 cmp r6, r8
7818: 8a00001a bhi 7888 <rtems_rbheap_allocate+0xe4>
uintptr_t aligned_size = align_up(alignment, size);
if (size > 0 && size <= aligned_size) {
rtems_rbheap_chunk *free_chunk = search_free_chunk(free_chain, aligned_size);
if (free_chunk != NULL) {
781c: e3570000 cmp r7, #0
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
void *ptr = NULL;
7820: 01a00007 moveq r0, r7
uintptr_t aligned_size = align_up(alignment, size);
if (size > 0 && size <= aligned_size) {
rtems_rbheap_chunk *free_chunk = search_free_chunk(free_chain, aligned_size);
if (free_chunk != NULL) {
7824: 08bd85f0 popeq {r4, r5, r6, r7, r8, sl, pc}
uintptr_t free_size = free_chunk->size;
if (free_size > aligned_size) {
7828: e1580006 cmp r8, r6
782c: 9a00001c bls 78a4 <rtems_rbheap_allocate+0x100>
*/
RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_head( the_chain )->next;
7830: e1a0a004 mov sl, r4
7834: e5ba500c ldr r5, [sl, #12]!
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_first( the_chain )
== _Chain_Immutable_tail( the_chain );
7838: e2843010 add r3, r4, #16
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
Chain_Control *the_chain
)
{
if ( !_Chain_Is_empty(the_chain))
783c: e1550003 cmp r5, r3
7840: 0a00001f beq 78c4 <rtems_rbheap_allocate+0x120>
Chain_Control *the_chain
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *old_first = head->next;
Chain_Node *new_first = old_first->next;
7844: e5953000 ldr r3, [r5]
head->next = new_first;
7848: e584300c str r3, [r4, #12]
new_first->previous = head;
784c: e583a004 str sl, [r3, #4]
if (new_chunk != NULL) {
uintptr_t new_free_size = free_size - aligned_size;
free_chunk->size = new_free_size;
new_chunk->begin = free_chunk->begin + new_free_size;
7850: e5972018 ldr r2, [r7, #24]
if (free_size > aligned_size) {
rtems_rbheap_chunk *new_chunk = get_chunk(control);
if (new_chunk != NULL) {
uintptr_t new_free_size = free_size - aligned_size;
7854: e0668008 rsb r8, r6, r8
*/
RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
Chain_Node *node
)
{
node->next = node->previous = NULL;
7858: e3a03000 mov r3, #0
free_chunk->size = new_free_size;
new_chunk->begin = free_chunk->begin + new_free_size;
785c: e0882002 add r2, r8, r2
7860: e1a01005 mov r1, r5
rtems_rbheap_chunk *new_chunk = get_chunk(control);
if (new_chunk != NULL) {
uintptr_t new_free_size = free_size - aligned_size;
free_chunk->size = new_free_size;
7864: e587801c str r8, [r7, #28]
new_chunk->begin = free_chunk->begin + new_free_size;
7868: e5852018 str r2, [r5, #24]
new_chunk->size = aligned_size;
786c: e585601c str r6, [r5, #28]
7870: e5853004 str r3, [r5, #4]
7874: e4813008 str r3, [r1], #8
static void insert_into_tree(
rtems_rbtree_control *tree,
rtems_rbheap_chunk *chunk
)
{
_RBTree_Insert_unprotected(tree, &chunk->tree_node);
7878: e2840018 add r0, r4, #24
787c: eb0006a7 bl 9320 <_RBTree_Insert_unprotected>
free_chunk->size = new_free_size;
new_chunk->begin = free_chunk->begin + new_free_size;
new_chunk->size = aligned_size;
rtems_chain_set_off_chain(&new_chunk->chain_node);
insert_into_tree(chunk_tree, new_chunk);
ptr = (void *) new_chunk->begin;
7880: e5950018 ldr r0, [r5, #24]
7884: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Next(
Chain_Node *the_node
)
{
return the_node->next;
7888: e5977000 ldr r7, [r7]
{
rtems_chain_node *current = rtems_chain_first(free_chain);
const rtems_chain_node *tail = rtems_chain_tail(free_chain);
rtems_rbheap_chunk *big_enough = NULL;
while (current != tail && big_enough == NULL) {
788c: e0523007 subs r3, r2, r7
7890: 13a03001 movne r3, #1
7894: e3530000 cmp r3, #0
7898: 1affffdc bne 7810 <rtems_rbheap_allocate+0x6c>
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
void *ptr = NULL;
789c: e1a00003 mov r0, r3
78a0: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
{
Chain_Node *next;
Chain_Node *previous;
next = the_node->next;
previous = the_node->previous;
78a4: e8970006 ldm r7, {r1, r2}
*/
RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
Chain_Node *node
)
{
node->next = node->previous = NULL;
78a8: e3a03000 mov r3, #0
Chain_Node *next;
Chain_Node *previous;
next = the_node->next;
previous = the_node->previous;
next->previous = previous;
78ac: e5812004 str r2, [r1, #4]
previous->next = next;
78b0: e5821000 str r1, [r2]
*/
RTEMS_INLINE_ROUTINE void _Chain_Set_off_chain(
Chain_Node *node
)
{
node->next = node->previous = NULL;
78b4: e5873004 str r3, [r7, #4]
78b8: e5873000 str r3, [r7]
ptr = (void *) new_chunk->begin;
}
} else {
rtems_chain_extract_unprotected(&free_chunk->chain_node);
rtems_chain_set_off_chain(&free_chunk->chain_node);
ptr = (void *) free_chunk->begin;
78bc: e5970018 ldr r0, [r7, #24]
78c0: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
{
rtems_chain_control *chain = &control->spare_descriptor_chain;
rtems_chain_node *chunk = rtems_chain_get_unprotected(chain);
if (chunk == NULL) {
(*control->extend_descriptors)(control);
78c4: e1a00004 mov r0, r4 <== NOT EXECUTED
78c8: e1a0e00f mov lr, pc <== NOT EXECUTED
78cc: e594f034 ldr pc, [r4, #52] ; 0x34 <== NOT EXECUTED
*/
RTEMS_INLINE_ROUTINE const Chain_Node *_Chain_Immutable_first(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_head( the_chain )->next;
78d0: e594300c ldr r3, [r4, #12] <== NOT EXECUTED
*/
RTEMS_INLINE_ROUTINE Chain_Node *_Chain_Get_unprotected(
Chain_Control *the_chain
)
{
if ( !_Chain_Is_empty(the_chain))
78d4: e1550003 cmp r5, r3 <== NOT EXECUTED
78d8: 0a000004 beq 78f0 <rtems_rbheap_allocate+0x14c> <== NOT EXECUTED
Chain_Control *the_chain
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *old_first = head->next;
Chain_Node *new_first = old_first->next;
78dc: e5932000 ldr r2, [r3] <== NOT EXECUTED
head->next = new_first;
new_first->previous = head;
78e0: e1a05003 mov r5, r3 <== NOT EXECUTED
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *old_first = head->next;
Chain_Node *new_first = old_first->next;
head->next = new_first;
78e4: e584200c str r2, [r4, #12] <== NOT EXECUTED
new_first->previous = head;
78e8: e582a004 str sl, [r2, #4] <== NOT EXECUTED
78ec: eaffffd7 b 7850 <rtems_rbheap_allocate+0xac> <== NOT EXECUTED
return big_enough;
}
void *rtems_rbheap_allocate(rtems_rbheap_control *control, size_t size)
{
void *ptr = NULL;
78f0: e3a00000 mov r0, #0 <== NOT EXECUTED
}
}
}
return ptr;
}
78f4: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc} <== NOT EXECUTED
00007a6c <rtems_rbheap_extend_descriptors_with_malloc>:
void rtems_rbheap_extend_descriptors_with_malloc(rtems_rbheap_control *control)
{
7a6c: e92d4010 push {r4, lr} <== NOT EXECUTED
7a70: e1a04000 mov r4, r0 <== NOT EXECUTED
rtems_rbheap_chunk *chunk = malloc(sizeof(*chunk));
7a74: e3a00020 mov r0, #32 <== NOT EXECUTED
7a78: ebffed70 bl 3040 <malloc> <== NOT EXECUTED
if (chunk != NULL) {
7a7c: e3500000 cmp r0, #0 <== NOT EXECUTED
7a80: 08bd8010 popeq {r4, pc} <== NOT EXECUTED
)
{
Chain_Node *before_node;
the_node->previous = after_node;
before_node = after_node->next;
7a84: e594300c ldr r3, [r4, #12] <== NOT EXECUTED
RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(
Chain_Control *the_chain,
Chain_Node *the_node
)
{
_Chain_Insert_unprotected(_Chain_Head(the_chain), the_node);
7a88: e284200c add r2, r4, #12 <== NOT EXECUTED
Chain_Node *the_node
)
{
Chain_Node *before_node;
the_node->previous = after_node;
7a8c: e5802004 str r2, [r0, #4] <== NOT EXECUTED
before_node = after_node->next;
after_node->next = the_node;
7a90: e584000c str r0, [r4, #12] <== NOT EXECUTED
the_node->next = before_node;
before_node->previous = the_node;
7a94: e5830004 str r0, [r3, #4] <== NOT EXECUTED
Chain_Node *before_node;
the_node->previous = after_node;
before_node = after_node->next;
after_node->next = the_node;
the_node->next = before_node;
7a98: e5803000 str r3, [r0] <== NOT EXECUTED
7a9c: e8bd8010 pop {r4, pc} <== NOT EXECUTED
0000f9c4 <rtems_task_mode>:
rtems_status_code rtems_task_mode(
rtems_mode mode_set,
rtems_mode mask,
rtems_mode *previous_mode_set
)
{
f9c4: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
ASR_Information *asr;
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
f9c8: e2525000 subs r5, r2, #0
rtems_status_code rtems_task_mode(
rtems_mode mode_set,
rtems_mode mask,
rtems_mode *previous_mode_set
)
{
f9cc: e1a04000 mov r4, r0
f9d0: e1a06001 mov r6, r1
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
return RTEMS_INVALID_ADDRESS;
f9d4: 03a00009 moveq r0, #9
ASR_Information *asr;
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
f9d8: 08bd8ff0 popeq {r4, r5, r6, r7, r8, r9, sl, fp, pc}
return RTEMS_INVALID_ADDRESS;
executing = _Thread_Executing;
f9dc: e59f9148 ldr r9, [pc, #328] ; fb2c <rtems_task_mode+0x168>
f9e0: e5997008 ldr r7, [r9, #8]
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
f9e4: e5d7a070 ldrb sl, [r7, #112] ; 0x70
if ( !previous_mode_set )
return RTEMS_INVALID_ADDRESS;
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
f9e8: e59780ec ldr r8, [r7, #236] ; 0xec
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
f9ec: e5973078 ldr r3, [r7, #120] ; 0x78
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
f9f0: e35a0000 cmp sl, #0
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
f9f4: e5d8b008 ldrb fp, [r8, #8]
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
f9f8: 03a0ac01 moveq sl, #256 ; 0x100
f9fc: 13a0a000 movne sl, #0
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
fa00: e3530000 cmp r3, #0
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
fa04: 138aac02 orrne sl, sl, #512 ; 0x200
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
fa08: e35b0000 cmp fp, #0
fa0c: 03a0bb01 moveq fp, #1024 ; 0x400
fa10: 13a0b000 movne fp, #0
old_mode |= _ISR_Get_level();
fa14: ebffeed4 bl b56c <_CPU_ISR_Get_level>
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
fa18: e18bb000 orr fp, fp, r0
old_mode |= _ISR_Get_level();
fa1c: e18ba00a orr sl, fp, sl
*previous_mode_set = old_mode;
/*
* These are generic thread scheduling characteristics.
*/
if ( mask & RTEMS_PREEMPT_MASK )
fa20: e3160c01 tst r6, #256 ; 0x100
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
old_mode |= _ISR_Get_level();
*previous_mode_set = old_mode;
fa24: e585a000 str sl, [r5]
/*
* These are generic thread scheduling characteristics.
*/
if ( mask & RTEMS_PREEMPT_MASK )
fa28: 0a000003 beq fa3c <rtems_task_mode+0x78>
executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;
fa2c: e3140c01 tst r4, #256 ; 0x100
fa30: 13a03000 movne r3, #0
fa34: 03a03001 moveq r3, #1
fa38: e5c73070 strb r3, [r7, #112] ; 0x70
if ( mask & RTEMS_TIMESLICE_MASK ) {
fa3c: e3160c02 tst r6, #512 ; 0x200
fa40: 1a000028 bne fae8 <rtems_task_mode+0x124>
}
/*
* Set the new interrupt level
*/
if ( mask & RTEMS_INTERRUPT_MASK )
fa44: e3160080 tst r6, #128 ; 0x80
fa48: 1a00002f bne fb0c <rtems_task_mode+0x148>
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
fa4c: e2166b01 ands r6, r6, #1024 ; 0x400
fa50: 0a000012 beq faa0 <rtems_task_mode+0xdc>
#include <rtems/score/tod.h>
#include <rtems/score/wkspace.h>
#include <rtems/score/apiext.h>
#include <rtems/score/sysstate.h>
rtems_status_code rtems_task_mode(
fa54: e3140b01 tst r4, #1024 ; 0x400
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
fa58: e5d82008 ldrb r2, [r8, #8]
#include <rtems/score/tod.h>
#include <rtems/score/wkspace.h>
#include <rtems/score/apiext.h>
#include <rtems/score/sysstate.h>
rtems_status_code rtems_task_mode(
fa5c: 13a03000 movne r3, #0
fa60: 03a03001 moveq r3, #1
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
fa64: e1520003 cmp r2, r3
/*
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
fa68: 03a06000 moveq r6, #0
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
fa6c: 0a00000b beq faa0 <rtems_task_mode+0xdc>
asr->is_enabled = is_asr_enabled;
fa70: e5c83008 strb r3, [r8, #8]
uint32_t level;
#if defined(ARM_MULTILIB_ARCH_V4)
uint32_t arm_switch_reg;
__asm__ volatile (
fa74: e10f3000 mrs r3, CPSR
fa78: e3832080 orr r2, r3, #128 ; 0x80
fa7c: e129f002 msr CPSR_fc, r2
{
rtems_signal_set _signals;
ISR_Level _level;
_ISR_Disable( _level );
_signals = information->signals_pending;
fa80: e5981018 ldr r1, [r8, #24]
information->signals_pending = information->signals_posted;
fa84: e5982014 ldr r2, [r8, #20]
information->signals_posted = _signals;
fa88: e5881014 str r1, [r8, #20]
rtems_signal_set _signals;
ISR_Level _level;
_ISR_Disable( _level );
_signals = information->signals_pending;
information->signals_pending = information->signals_posted;
fa8c: e5882018 str r2, [r8, #24]
static inline void arm_interrupt_enable( uint32_t level )
{
#if defined(ARM_MULTILIB_ARCH_V4)
ARM_SWITCH_REGISTERS;
__asm__ volatile (
fa90: e129f003 msr CPSR_fc, r3
_ASR_Swap_signals( asr );
if ( _ASR_Are_signals_pending( asr ) ) {
fa94: e5986014 ldr r6, [r8, #20]
fa98: e3560000 cmp r6, #0
/*
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
fa9c: 13a06001 movne r6, #1
needs_asr_dispatching = true;
}
}
}
if ( _System_state_Is_up( _System_state_Get() ) ) {
faa0: e59f3088 ldr r3, [pc, #136] ; fb30 <rtems_task_mode+0x16c>
faa4: e5933000 ldr r3, [r3]
faa8: e3530003 cmp r3, #3
if (_Thread_Evaluate_is_dispatch_needed( needs_asr_dispatching ) )
_Thread_Dispatch();
}
return RTEMS_SUCCESSFUL;
faac: 13a00000 movne r0, #0
needs_asr_dispatching = true;
}
}
}
if ( _System_state_Is_up( _System_state_Get() ) ) {
fab0: 18bd8ff0 popne {r4, r5, r6, r7, r8, r9, sl, fp, pc}
{
Thread_Control *executing;
executing = _Thread_Executing;
if ( are_signals_pending ||
fab4: e3560000 cmp r6, #0
bool are_signals_pending
)
{
Thread_Control *executing;
executing = _Thread_Executing;
fab8: e5993008 ldr r3, [r9, #8]
if ( are_signals_pending ||
fabc: 1a000015 bne fb18 <rtems_task_mode+0x154>
fac0: e59f2064 ldr r2, [pc, #100] ; fb2c <rtems_task_mode+0x168>
fac4: e592200c ldr r2, [r2, #12]
fac8: e1530002 cmp r3, r2
if (_Thread_Evaluate_is_dispatch_needed( needs_asr_dispatching ) )
_Thread_Dispatch();
}
return RTEMS_SUCCESSFUL;
facc: 01a00006 moveq r0, r6
fad0: 08bd8ff0 popeq {r4, r5, r6, r7, r8, r9, sl, fp, pc}
(!_Thread_Is_heir( executing ) && executing->is_preemptible) ) {
fad4: e5d33070 ldrb r3, [r3, #112] ; 0x70
fad8: e3530000 cmp r3, #0
fadc: 1a00000d bne fb18 <rtems_task_mode+0x154>
fae0: e1a00006 mov r0, r6 <== NOT EXECUTED
}
fae4: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc} <== NOT EXECUTED
*/
if ( mask & RTEMS_PREEMPT_MASK )
executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;
if ( mask & RTEMS_TIMESLICE_MASK ) {
if ( _Modes_Is_timeslice(mode_set) ) {
fae8: e2143c02 ands r3, r4, #512 ; 0x200
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
faec: 159f3040 ldrne r3, [pc, #64] ; fb34 <rtems_task_mode+0x170>
faf0: 15933000 ldrne r3, [r3]
if ( mask & RTEMS_PREEMPT_MASK )
executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;
if ( mask & RTEMS_TIMESLICE_MASK ) {
if ( _Modes_Is_timeslice(mode_set) ) {
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
faf4: 13a02001 movne r2, #1
faf8: 15872078 strne r2, [r7, #120] ; 0x78
executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
fafc: 15873074 strne r3, [r7, #116] ; 0x74
} else
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
fb00: 05873078 streq r3, [r7, #120] ; 0x78
}
/*
* Set the new interrupt level
*/
if ( mask & RTEMS_INTERRUPT_MASK )
fb04: e3160080 tst r6, #128 ; 0x80
fb08: 0affffcf beq fa4c <rtems_task_mode+0x88>
*/
RTEMS_INLINE_ROUTINE void _Modes_Set_interrupt_level (
Modes_Control mode_set
)
{
_ISR_Set_level( _Modes_Get_interrupt_level( mode_set ) );
fb0c: e2040080 and r0, r4, #128 ; 0x80
fb10: ebffee90 bl b558 <_CPU_ISR_Set_level>
fb14: eaffffcc b fa4c <rtems_task_mode+0x88>
_Thread_Dispatch_necessary = true;
fb18: e3a03001 mov r3, #1
fb1c: e5c93004 strb r3, [r9, #4]
}
}
if ( _System_state_Is_up( _System_state_Get() ) ) {
if (_Thread_Evaluate_is_dispatch_needed( needs_asr_dispatching ) )
_Thread_Dispatch();
fb20: eb000287 bl 10544 <_Thread_Dispatch>
}
return RTEMS_SUCCESSFUL;
fb24: e3a00000 mov r0, #0
fb28: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}