RTEMS 4.11Annotated Report
Wed Jan 26 15:40:55 2011
00014f98 <_CORE_message_queue_Broadcast>:
{
Thread_Control *the_thread;
uint32_t number_broadcasted;
Thread_Wait_information *waitp;
if ( size > the_message_queue->maximum_message_size ) {
14f98: e590304c ldr r3, [r0, #76] ; 0x4c
Objects_Id id __attribute__((unused)),
CORE_message_queue_API_mp_support_callout api_message_queue_mp_support __attribute__((unused)),
#endif
uint32_t *count
)
{
14f9c: e92d45f0 push {r4, r5, r6, r7, r8, sl, lr}
Thread_Control *the_thread;
uint32_t number_broadcasted;
Thread_Wait_information *waitp;
if ( size > the_message_queue->maximum_message_size ) {
14fa0: e1520003 cmp r2, r3
Objects_Id id __attribute__((unused)),
CORE_message_queue_API_mp_support_callout api_message_queue_mp_support __attribute__((unused)),
#endif
uint32_t *count
)
{
14fa4: e1a06000 mov r6, r0
14fa8: e1a0a001 mov sl, r1
14fac: e1a07002 mov r7, r2
14fb0: e59d8020 ldr r8, [sp, #32]
Thread_Control *the_thread;
uint32_t number_broadcasted;
Thread_Wait_information *waitp;
if ( size > the_message_queue->maximum_message_size ) {
14fb4: 8a000013 bhi 15008 <_CORE_message_queue_Broadcast+0x70>
* NOTE: This check is critical because threads can block on
* send and receive and this ensures that we are broadcasting
* the message to threads waiting to receive -- not to send.
*/
if ( the_message_queue->number_of_pending_messages != 0 ) {
14fb8: e5905048 ldr r5, [r0, #72] ; 0x48
14fbc: e3550000 cmp r5, #0
*count = 0;
14fc0: 13a00000 movne r0, #0
14fc4: 15880000 strne r0, [r8]
* NOTE: This check is critical because threads can block on
* send and receive and this ensures that we are broadcasting
* the message to threads waiting to receive -- not to send.
*/
if ( the_message_queue->number_of_pending_messages != 0 ) {
14fc8: 0a000007 beq 14fec <_CORE_message_queue_Broadcast+0x54>
14fcc: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
const void *source,
void *destination,
size_t size
)
{
memcpy(destination, source, size);
14fd0: e594002c ldr r0, [r4, #44] ; 0x2c
14fd4: e1a0100a mov r1, sl
14fd8: e1a02007 mov r2, r7
14fdc: eb00221b bl 1d850 <memcpy>
buffer,
waitp->return_argument_second.mutable_object,
size
);
*(size_t *) the_thread->Wait.return_argument = size;
14fe0: e5943028 ldr r3, [r4, #40] ; 0x28
14fe4: e5837000 str r7, [r3]
*/
number_broadcasted = 0;
while ((the_thread =
_Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
waitp = &the_thread->Wait;
number_broadcasted += 1;
14fe8: e2855001 add r5, r5, #1
/*
* There must be no pending messages if there is a thread waiting to
* receive a message.
*/
number_broadcasted = 0;
while ((the_thread =
14fec: e1a00006 mov r0, r6
14ff0: eb000a7e bl 179f0 <_Thread_queue_Dequeue>
14ff4: e2504000 subs r4, r0, #0
14ff8: 1afffff4 bne 14fd0 <_CORE_message_queue_Broadcast+0x38>
if ( !_Objects_Is_local_id( the_thread->Object.id ) )
(*api_message_queue_mp_support) ( the_thread, id );
#endif
}
*count = number_broadcasted;
14ffc: e5885000 str r5, [r8]
return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
15000: e1a00004 mov r0, r4
15004: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc}
Thread_Control *the_thread;
uint32_t number_broadcasted;
Thread_Wait_information *waitp;
if ( size > the_message_queue->maximum_message_size ) {
return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
15008: e3a00001 mov r0, #1 <== NOT EXECUTED
#endif
}
*count = number_broadcasted;
return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
}
1500c: e8bd85f0 pop {r4, r5, r6, r7, r8, sl, pc} <== NOT EXECUTED
000070e8 <_Heap_Walk>:
bool _Heap_Walk(
Heap_Control *heap,
int source,
bool dump
)
{
70e8: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
70ec: e5903014 ldr r3, [r0, #20]
bool _Heap_Walk(
Heap_Control *heap,
int source,
bool dump
)
{
70f0: e24dd030 sub sp, sp, #48 ; 0x30
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
70f4: e58d3024 str r3, [sp, #36] ; 0x24
Heap_Block *const first_block = heap->first_block;
Heap_Block *const last_block = heap->last_block;
70f8: e5903024 ldr r3, [r0, #36] ; 0x24
Heap_Block *block = first_block;
Heap_Walk_printer printer = dump ?
_Heap_Walk_print : _Heap_Walk_print_nothing;
70fc: e59f4500 ldr r4, [pc, #1280] ; 7604 <_Heap_Walk+0x51c>
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
Heap_Block *const first_block = heap->first_block;
Heap_Block *const last_block = heap->last_block;
7100: e58d3028 str r3, [sp, #40] ; 0x28
Heap_Block *block = first_block;
Heap_Walk_printer printer = dump ?
_Heap_Walk_print : _Heap_Walk_print_nothing;
7104: e59f34fc ldr r3, [pc, #1276] ; 7608 <_Heap_Walk+0x520>
7108: e31200ff tst r2, #255 ; 0xff
710c: 11a04003 movne r4, r3
if ( !_System_state_Is_up( _System_state_Get() ) ) {
7110: e59f34f4 ldr r3, [pc, #1268] ; 760c <_Heap_Walk+0x524>
7114: e5933000 ldr r3, [r3]
bool dump
)
{
uintptr_t const page_size = heap->page_size;
uintptr_t const min_block_size = heap->min_block_size;
Heap_Block *const first_block = heap->first_block;
7118: e590c020 ldr ip, [r0, #32]
Heap_Block *const last_block = heap->last_block;
Heap_Block *block = first_block;
Heap_Walk_printer printer = dump ?
_Heap_Walk_print : _Heap_Walk_print_nothing;
if ( !_System_state_Is_up( _System_state_Get() ) ) {
711c: e3530003 cmp r3, #3
bool _Heap_Walk(
Heap_Control *heap,
int source,
bool dump
)
{
7120: e1a06000 mov r6, r0
7124: e1a05001 mov r5, r1
uintptr_t const page_size = heap->page_size;
7128: e5909010 ldr r9, [r0, #16]
uintptr_t const min_block_size = heap->min_block_size;
Heap_Block *const first_block = heap->first_block;
712c: e58dc020 str ip, [sp, #32]
Heap_Block *const last_block = heap->last_block;
Heap_Block *block = first_block;
Heap_Walk_printer printer = dump ?
_Heap_Walk_print : _Heap_Walk_print_nothing;
if ( !_System_state_Is_up( _System_state_Get() ) ) {
7130: 1a000127 bne 75d4 <_Heap_Walk+0x4ec>
Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
Heap_Block *const first_block = heap->first_block;
Heap_Block *const last_block = heap->last_block;
(*printer)(
7134: e59dc024 ldr ip, [sp, #36] ; 0x24
7138: e58dc000 str ip, [sp]
713c: e5903018 ldr r3, [r0, #24]
7140: e58d3004 str r3, [sp, #4]
7144: e590301c ldr r3, [r0, #28]
7148: e59d2020 ldr r2, [sp, #32]
714c: e58d3008 str r3, [sp, #8]
7150: e59d3028 ldr r3, [sp, #40] ; 0x28
7154: e58d200c str r2, [sp, #12]
7158: e58d3010 str r3, [sp, #16]
715c: e5903008 ldr r3, [r0, #8]
7160: e58d3014 str r3, [sp, #20]
7164: e590300c ldr r3, [r0, #12]
7168: e59f24a0 ldr r2, [pc, #1184] ; 7610 <_Heap_Walk+0x528>
716c: e58d3018 str r3, [sp, #24]
7170: e1a00001 mov r0, r1
7174: e1a03009 mov r3, r9
7178: e3a01000 mov r1, #0
717c: e1a0e00f mov lr, pc
7180: e12fff14 bx r4
heap->area_begin, heap->area_end,
first_block, last_block,
first_free_block, last_free_block
);
if ( page_size == 0 ) {
7184: e3590000 cmp r9, #0
7188: 1a000006 bne 71a8 <_Heap_Walk+0xc0>
(*printer)( source, true, "page size is zero\n" );
718c: e1a00005 mov r0, r5
7190: e3a01001 mov r1, #1
7194: e59f2478 ldr r2, [pc, #1144] ; 7614 <_Heap_Walk+0x52c>
7198: e1a0e00f mov lr, pc
719c: e12fff14 bx r4
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
}
if ( !_Heap_Walk_check_control( source, printer, heap ) ) {
return false;
71a0: e1a08009 mov r8, r9
71a4: ea00010b b 75d8 <_Heap_Walk+0x4f0>
(*printer)( source, true, "page size is zero\n" );
return false;
}
if ( !_Addresses_Is_aligned( (void *) page_size ) ) {
71a8: e2198007 ands r8, r9, #7
(*printer)(
71ac: 11a00005 movne r0, r5
71b0: 13a01001 movne r1, #1
71b4: 159f245c ldrne r2, [pc, #1116] ; 7618 <_Heap_Walk+0x530>
71b8: 11a03009 movne r3, r9
71bc: 1a00010c bne 75f4 <_Heap_Walk+0x50c>
RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
uintptr_t value,
uintptr_t alignment
)
{
return (value % alignment) == 0;
71c0: e59d0024 ldr r0, [sp, #36] ; 0x24
71c4: e1a01009 mov r1, r9
71c8: ebffe785 bl fe4 <__umodsi3>
);
return false;
}
if ( !_Heap_Is_aligned( min_block_size, page_size ) ) {
71cc: e250b000 subs fp, r0, #0
71d0: 0a000006 beq 71f0 <_Heap_Walk+0x108>
(*printer)(
71d4: e1a00005 mov r0, r5
71d8: e3a01001 mov r1, #1
71dc: e59f2438 ldr r2, [pc, #1080] ; 761c <_Heap_Walk+0x534>
71e0: e59d3024 ldr r3, [sp, #36] ; 0x24
71e4: e1a0e00f mov lr, pc
71e8: e12fff14 bx r4
71ec: ea0000f9 b 75d8 <_Heap_Walk+0x4f0>
71f0: e59dc020 ldr ip, [sp, #32]
71f4: e1a01009 mov r1, r9
71f8: e28c0008 add r0, ip, #8
71fc: ebffe778 bl fe4 <__umodsi3>
);
return false;
}
if (
7200: e250a000 subs sl, r0, #0
!_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
) {
(*printer)(
7204: 11a00005 movne r0, r5
7208: 13a01001 movne r1, #1
720c: 159f240c ldrne r2, [pc, #1036] ; 7620 <_Heap_Walk+0x538>
7210: 159d3020 ldrne r3, [sp, #32]
7214: 1a0000cc bne 754c <_Heap_Walk+0x464>
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
7218: e59d2020 ldr r2, [sp, #32]
721c: e5928004 ldr r8, [r2, #4]
);
return false;
}
if ( !_Heap_Is_prev_used( first_block ) ) {
7220: e2188001 ands r8, r8, #1
(*printer)(
7224: 01a00005 moveq r0, r5
7228: 03a01001 moveq r1, #1
722c: 059f23f0 ldreq r2, [pc, #1008] ; 7624 <_Heap_Walk+0x53c>
7230: 0a000009 beq 725c <_Heap_Walk+0x174>
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
7234: e59d3028 ldr r3, [sp, #40] ; 0x28
7238: e5937004 ldr r7, [r3, #4]
723c: e3c77001 bic r7, r7, #1
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
7240: e0837007 add r7, r3, r7
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
7244: e5978004 ldr r8, [r7, #4]
);
return false;
}
if ( _Heap_Is_free( last_block ) ) {
7248: e2188001 ands r8, r8, #1
724c: 1a000005 bne 7268 <_Heap_Walk+0x180>
(*printer)(
7250: e59f23d0 ldr r2, [pc, #976] ; 7628 <_Heap_Walk+0x540>
7254: e1a00005 mov r0, r5
7258: e3a01001 mov r1, #1
725c: e1a0e00f mov lr, pc
7260: e12fff14 bx r4
7264: ea0000db b 75d8 <_Heap_Walk+0x4f0>
);
return false;
}
if (
7268: e59dc020 ldr ip, [sp, #32]
726c: e157000c cmp r7, ip
7270: 0a000006 beq 7290 <_Heap_Walk+0x1a8>
_Heap_Block_at( last_block, _Heap_Block_size( last_block ) ) != first_block
) {
(*printer)(
7274: e1a00005 mov r0, r5 <== NOT EXECUTED
7278: e3a01001 mov r1, #1 <== NOT EXECUTED
727c: e59f23a8 ldr r2, [pc, #936] ; 762c <_Heap_Walk+0x544> <== NOT EXECUTED
7280: e1a0e00f mov lr, pc <== NOT EXECUTED
7284: e12fff14 bx r4 <== NOT EXECUTED
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
}
if ( !_Heap_Walk_check_control( source, printer, heap ) ) {
return false;
7288: e1a0800a mov r8, sl <== NOT EXECUTED
728c: ea0000d1 b 75d8 <_Heap_Walk+0x4f0> <== NOT EXECUTED
int source,
Heap_Walk_printer printer,
Heap_Control *heap
)
{
uintptr_t const page_size = heap->page_size;
7290: e596b010 ldr fp, [r6, #16]
block = next_block;
} while ( block != first_block );
return true;
}
7294: e5968008 ldr r8, [r6, #8]
Heap_Walk_printer printer,
Heap_Control *heap
)
{
uintptr_t const page_size = heap->page_size;
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
7298: e1a0a006 mov sl, r6
729c: ea000034 b 7374 <_Heap_Walk+0x28c>
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
72a0: e5963020 ldr r3, [r6, #32]
72a4: e1530008 cmp r3, r8
72a8: 83a0c000 movhi ip, #0
72ac: 8a000003 bhi 72c0 <_Heap_Walk+0x1d8>
72b0: e596c024 ldr ip, [r6, #36] ; 0x24
72b4: e15c0008 cmp ip, r8
72b8: 33a0c000 movcc ip, #0
72bc: 23a0c001 movcs ip, #1
const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
const Heap_Block *prev_block = free_list_tail;
const Heap_Block *free_block = first_free_block;
while ( free_block != free_list_tail ) {
if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
72c0: e21cc0ff ands ip, ip, #255 ; 0xff
(*printer)(
72c4: 01a00005 moveq r0, r5
72c8: 03a01001 moveq r1, #1
72cc: 059f235c ldreq r2, [pc, #860] ; 7630 <_Heap_Walk+0x548>
72d0: 0a000012 beq 7320 <_Heap_Walk+0x238>
RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
uintptr_t value,
uintptr_t alignment
)
{
return (value % alignment) == 0;
72d4: e2880008 add r0, r8, #8
72d8: e1a0100b mov r1, fp
72dc: ebffe740 bl fe4 <__umodsi3>
);
return false;
}
if (
72e0: e250c000 subs ip, r0, #0
!_Heap_Is_aligned( _Heap_Alloc_area_of_block( free_block ), page_size )
) {
(*printer)(
72e4: 11a00005 movne r0, r5
72e8: 13a01001 movne r1, #1
72ec: 159f2340 ldrne r2, [pc, #832] ; 7634 <_Heap_Walk+0x54c>
72f0: 11a03008 movne r3, r8
72f4: 1a0000be bne 75f4 <_Heap_Walk+0x50c>
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
72f8: e5983004 ldr r3, [r8, #4]
72fc: e3c33001 bic r3, r3, #1
block = next_block;
} while ( block != first_block );
return true;
}
7300: e0883003 add r3, r8, r3
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
7304: e5933004 ldr r3, [r3, #4]
);
return false;
}
if ( _Heap_Is_used( free_block ) ) {
7308: e2133001 ands r3, r3, #1
730c: e58d302c str r3, [sp, #44] ; 0x2c
7310: 0a000009 beq 733c <_Heap_Walk+0x254>
(*printer)(
7314: e59f231c ldr r2, [pc, #796] ; 7638 <_Heap_Walk+0x550>
7318: e1a00005 mov r0, r5
731c: e3a01001 mov r1, #1
7320: e1a03008 mov r3, r8
7324: e58dc01c str ip, [sp, #28]
7328: e1a0e00f mov lr, pc
732c: e12fff14 bx r4
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
}
if ( !_Heap_Walk_check_control( source, printer, heap ) ) {
return false;
7330: e59dc01c ldr ip, [sp, #28]
7334: e1a0800c mov r8, ip
7338: ea0000a6 b 75d8 <_Heap_Walk+0x4f0>
);
return false;
}
if ( free_block->prev != prev_block ) {
733c: e598300c ldr r3, [r8, #12]
7340: e153000a cmp r3, sl
7344: 0a000008 beq 736c <_Heap_Walk+0x284>
(*printer)(
7348: e58d3000 str r3, [sp]
734c: e1a00005 mov r0, r5
7350: e1a03008 mov r3, r8
7354: e3a01001 mov r1, #1
7358: e59f22dc ldr r2, [pc, #732] ; 763c <_Heap_Walk+0x554>
735c: e1a0e00f mov lr, pc
7360: e12fff14 bx r4
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
}
if ( !_Heap_Walk_check_control( source, printer, heap ) ) {
return false;
7364: e59d802c ldr r8, [sp, #44] ; 0x2c
7368: ea00009a b 75d8 <_Heap_Walk+0x4f0>
return false;
}
prev_block = free_block;
free_block = free_block->next;
736c: e1a0a008 mov sl, r8
7370: e5988008 ldr r8, [r8, #8]
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
const Heap_Block *prev_block = free_list_tail;
const Heap_Block *free_block = first_free_block;
while ( free_block != free_list_tail ) {
7374: e1580006 cmp r8, r6
7378: 1affffc8 bne 72a0 <_Heap_Walk+0x1b8>
737c: ea000000 b 7384 <_Heap_Walk+0x29c>
block->prev_size
);
}
block = next_block;
} while ( block != first_block );
7380: e1a07008 mov r7, r8
return true;
}
7384: e5973004 ldr r3, [r7, #4]
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
7388: e5962020 ldr r2, [r6, #32]
- HEAP_BLOCK_HEADER_SIZE);
}
RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
{
return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
738c: e3c3a001 bic sl, r3, #1
RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
const Heap_Block *block,
uintptr_t offset
)
{
return (Heap_Block *) ((uintptr_t) block + offset);
7390: e087800a add r8, r7, sl
const Heap_Control *heap,
const Heap_Block *block
)
{
return (uintptr_t) block >= (uintptr_t) heap->first_block
&& (uintptr_t) block <= (uintptr_t) heap->last_block;
7394: e1520008 cmp r2, r8
7398: 83a0b000 movhi fp, #0
739c: 8a000003 bhi 73b0 <_Heap_Walk+0x2c8>
73a0: e596b024 ldr fp, [r6, #36] ; 0x24
73a4: e15b0008 cmp fp, r8
73a8: 33a0b000 movcc fp, #0
73ac: 23a0b001 movcs fp, #1
bool const prev_used = _Heap_Is_prev_used( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
uintptr_t const next_block_begin = (uintptr_t) next_block;
bool const is_not_last_block = block != last_block;
if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
73b0: e21bb0ff ands fp, fp, #255 ; 0xff
73b4: 1a000007 bne 73d8 <_Heap_Walk+0x2f0>
(*printer)(
73b8: e58d8000 str r8, [sp]
73bc: e1a00005 mov r0, r5
73c0: e3a01001 mov r1, #1
73c4: e59f2274 ldr r2, [pc, #628] ; 7640 <_Heap_Walk+0x558>
73c8: e1a03007 mov r3, r7
73cc: e1a0e00f mov lr, pc
73d0: e12fff14 bx r4
73d4: ea00005e b 7554 <_Heap_Walk+0x46c>
uintptr_t const block_begin = (uintptr_t) block;
uintptr_t const block_size = _Heap_Block_size( block );
bool const prev_used = _Heap_Is_prev_used( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
uintptr_t const next_block_begin = (uintptr_t) next_block;
bool const is_not_last_block = block != last_block;
73d8: e59d2028 ldr r2, [sp, #40] ; 0x28
RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
uintptr_t value,
uintptr_t alignment
)
{
return (value % alignment) == 0;
73dc: e1a0000a mov r0, sl
73e0: e1a01009 mov r1, r9
73e4: e057b002 subs fp, r7, r2
73e8: 13a0b001 movne fp, #1
73ec: e58d301c str r3, [sp, #28]
73f0: ebffe6fb bl fe4 <__umodsi3>
);
return false;
}
if ( !_Heap_Is_aligned( block_size, page_size ) && is_not_last_block ) {
73f4: e3500000 cmp r0, #0
73f8: e59d301c ldr r3, [sp, #28]
73fc: 0a000005 beq 7418 <_Heap_Walk+0x330>
7400: e35b0000 cmp fp, #0
(*printer)(
7404: 158da000 strne sl, [sp]
7408: 11a00005 movne r0, r5
740c: 13a01001 movne r1, #1
7410: 159f222c ldrne r2, [pc, #556] ; 7644 <_Heap_Walk+0x55c>
7414: 1a000014 bne 746c <_Heap_Walk+0x384>
);
return false;
}
if ( block_size < min_block_size && is_not_last_block ) {
7418: e59dc024 ldr ip, [sp, #36] ; 0x24
741c: e15a000c cmp sl, ip
7420: 2a000009 bcs 744c <_Heap_Walk+0x364>
7424: e35b0000 cmp fp, #0
7428: 0a000007 beq 744c <_Heap_Walk+0x364>
(*printer)(
742c: e88d1400 stm sp, {sl, ip}
7430: e1a00005 mov r0, r5
7434: e3a01001 mov r1, #1
7438: e59f2208 ldr r2, [pc, #520] ; 7648 <_Heap_Walk+0x560>
743c: e1a03007 mov r3, r7
7440: e1a0e00f mov lr, pc
7444: e12fff14 bx r4
7448: ea00006b b 75fc <_Heap_Walk+0x514>
);
return false;
}
if ( next_block_begin <= block_begin && is_not_last_block ) {
744c: e1580007 cmp r8, r7
7450: 8a000009 bhi 747c <_Heap_Walk+0x394>
7454: e35b0000 cmp fp, #0
7458: 0a000007 beq 747c <_Heap_Walk+0x394>
(*printer)(
745c: e58d8000 str r8, [sp]
7460: e59f21e4 ldr r2, [pc, #484] ; 764c <_Heap_Walk+0x564>
7464: e1a00005 mov r0, r5
7468: e3a01001 mov r1, #1
746c: e1a03007 mov r3, r7
7470: e1a0e00f mov lr, pc
7474: e12fff14 bx r4
7478: ea00005f b 75fc <_Heap_Walk+0x514>
block->size_and_flag = size | flag;
}
RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
{
return block->size_and_flag & HEAP_PREV_BLOCK_USED;
747c: e203b001 and fp, r3, #1
7480: e5983004 ldr r3, [r8, #4]
);
return false;
}
if ( !_Heap_Is_prev_used( next_block ) ) {
7484: e3130001 tst r3, #1
7488: 1a00003b bne 757c <_Heap_Walk+0x494>
false,
"block 0x%08x: size %u, prev 0x%08x%s, next 0x%08x%s\n",
block,
block_size,
block->prev,
block->prev == first_free_block ?
748c: e597200c ldr r2, [r7, #12]
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
bool const prev_used = _Heap_Is_prev_used( block );
uintptr_t const block_size = _Heap_Block_size( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
(*printer)(
7490: e5963008 ldr r3, [r6, #8]
7494: e1520003 cmp r2, r3
block = next_block;
} while ( block != first_block );
return true;
}
7498: e596100c ldr r1, [r6, #12]
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
bool const prev_used = _Heap_Is_prev_used( block );
uintptr_t const block_size = _Heap_Block_size( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
(*printer)(
749c: 059f01ac ldreq r0, [pc, #428] ; 7650 <_Heap_Walk+0x568>
74a0: 0a000003 beq 74b4 <_Heap_Walk+0x3cc>
block,
block_size,
block->prev,
block->prev == first_free_block ?
" (= first free)"
: (block->prev == free_list_head ? " (= head)" : ""),
74a4: e59f31a8 ldr r3, [pc, #424] ; 7654 <_Heap_Walk+0x56c>
74a8: e1520006 cmp r2, r6
74ac: e59f01a4 ldr r0, [pc, #420] ; 7658 <_Heap_Walk+0x570>
74b0: 01a00003 moveq r0, r3
block->next,
block->next == last_free_block ?
74b4: e5973008 ldr r3, [r7, #8]
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
bool const prev_used = _Heap_Is_prev_used( block );
uintptr_t const block_size = _Heap_Block_size( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
(*printer)(
74b8: e1530001 cmp r3, r1
74bc: 059f1198 ldreq r1, [pc, #408] ; 765c <_Heap_Walk+0x574>
74c0: 0a000003 beq 74d4 <_Heap_Walk+0x3ec>
" (= first free)"
: (block->prev == free_list_head ? " (= head)" : ""),
block->next,
block->next == last_free_block ?
" (= last free)"
: (block->next == free_list_tail ? " (= tail)" : "")
74c4: e59fc194 ldr ip, [pc, #404] ; 7660 <_Heap_Walk+0x578>
74c8: e1530006 cmp r3, r6
74cc: e59f1184 ldr r1, [pc, #388] ; 7658 <_Heap_Walk+0x570>
74d0: 01a0100c moveq r1, ip
Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
bool const prev_used = _Heap_Is_prev_used( block );
uintptr_t const block_size = _Heap_Block_size( block );
Heap_Block *const next_block = _Heap_Block_at( block, block_size );
(*printer)(
74d4: e58d2004 str r2, [sp, #4]
74d8: e58d0008 str r0, [sp, #8]
74dc: e58d300c str r3, [sp, #12]
74e0: e58d1010 str r1, [sp, #16]
74e4: e1a03007 mov r3, r7
74e8: e58da000 str sl, [sp]
74ec: e1a00005 mov r0, r5
74f0: e3a01000 mov r1, #0
74f4: e59f2168 ldr r2, [pc, #360] ; 7664 <_Heap_Walk+0x57c>
74f8: e1a0e00f mov lr, pc
74fc: e12fff14 bx r4
block->next == last_free_block ?
" (= last free)"
: (block->next == free_list_tail ? " (= tail)" : "")
);
if ( block_size != next_block->prev_size ) {
7500: e5983000 ldr r3, [r8]
7504: e15a0003 cmp sl, r3
7508: 0a000009 beq 7534 <_Heap_Walk+0x44c>
(*printer)(
750c: e58d3004 str r3, [sp, #4]
7510: e58da000 str sl, [sp]
7514: e58d8008 str r8, [sp, #8]
7518: e1a00005 mov r0, r5
751c: e3a01001 mov r1, #1
7520: e59f2140 ldr r2, [pc, #320] ; 7668 <_Heap_Walk+0x580>
7524: e1a03007 mov r3, r7
7528: e1a0e00f mov lr, pc
752c: e12fff14 bx r4
7530: ea000031 b 75fc <_Heap_Walk+0x514>
);
return false;
}
if ( !prev_used ) {
7534: e35b0000 cmp fp, #0
7538: 1a000007 bne 755c <_Heap_Walk+0x474>
(*printer)(
753c: e59f2128 ldr r2, [pc, #296] ; 766c <_Heap_Walk+0x584>
7540: e1a00005 mov r0, r5
7544: e3a01001 mov r1, #1
7548: e1a03007 mov r3, r7
754c: e1a0e00f mov lr, pc
7550: e12fff14 bx r4
return false;
}
if ( !_Heap_Is_prev_used( next_block ) ) {
if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) {
return false;
7554: e1a0800b mov r8, fp
7558: ea00001e b 75d8 <_Heap_Walk+0x4f0>
block = next_block;
} while ( block != first_block );
return true;
}
755c: e5963008 ldr r3, [r6, #8]
7560: ea000002 b 7570 <_Heap_Walk+0x488>
{
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *free_block = _Heap_Free_list_first( heap );
while ( free_block != free_list_tail ) {
if ( free_block == block ) {
7564: e1530007 cmp r3, r7
7568: 0a000016 beq 75c8 <_Heap_Walk+0x4e0>
return true;
}
free_block = free_block->next;
756c: e5933008 ldr r3, [r3, #8]
)
{
const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
const Heap_Block *free_block = _Heap_Free_list_first( heap );
while ( free_block != free_list_tail ) {
7570: e1530006 cmp r3, r6
7574: 1afffffa bne 7564 <_Heap_Walk+0x47c>
7578: ea000019 b 75e4 <_Heap_Walk+0x4fc>
if ( !_Heap_Is_prev_used( next_block ) ) {
if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) {
return false;
}
} else if (prev_used) {
757c: e35b0000 cmp fp, #0
7580: 0a000007 beq 75a4 <_Heap_Walk+0x4bc>
(*printer)(
7584: e58da000 str sl, [sp]
7588: e1a00005 mov r0, r5
758c: e3a01000 mov r1, #0
7590: e59f20d8 ldr r2, [pc, #216] ; 7670 <_Heap_Walk+0x588>
7594: e1a03007 mov r3, r7
7598: e1a0e00f mov lr, pc
759c: e12fff14 bx r4
75a0: ea000008 b 75c8 <_Heap_Walk+0x4e0>
"block 0x%08x: size %u\n",
block,
block_size
);
} else {
(*printer)(
75a4: e58da000 str sl, [sp]
75a8: e5973000 ldr r3, [r7]
75ac: e1a00005 mov r0, r5
75b0: e58d3004 str r3, [sp, #4]
75b4: e1a0100b mov r1, fp
75b8: e59f20b4 ldr r2, [pc, #180] ; 7674 <_Heap_Walk+0x58c>
75bc: e1a03007 mov r3, r7
75c0: e1a0e00f mov lr, pc
75c4: e12fff14 bx r4
block->prev_size
);
}
block = next_block;
} while ( block != first_block );
75c8: e59d2020 ldr r2, [sp, #32]
75cc: e1580002 cmp r8, r2
75d0: 1affff6a bne 7380 <_Heap_Walk+0x298>
Heap_Block *block = first_block;
Heap_Walk_printer printer = dump ?
_Heap_Walk_print : _Heap_Walk_print_nothing;
if ( !_System_state_Is_up( _System_state_Get() ) ) {
return true;
75d4: e3a08001 mov r8, #1
block = next_block;
} while ( block != first_block );
return true;
}
75d8: e1a00008 mov r0, r8
75dc: e28dd030 add sp, sp, #48 ; 0x30
75e0: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}
return false;
}
if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
(*printer)(
75e4: e59f208c ldr r2, [pc, #140] ; 7678 <_Heap_Walk+0x590>
75e8: e1a00005 mov r0, r5
75ec: e3a01001 mov r1, #1
75f0: e1a03007 mov r3, r7
75f4: e1a0e00f mov lr, pc
75f8: e12fff14 bx r4
return false;
}
if ( !_Heap_Is_prev_used( next_block ) ) {
if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) {
return false;
75fc: e3a08000 mov r8, #0
7600: eafffff4 b 75d8 <_Heap_Walk+0x4f0>
00005d78 <aio_fsync>:
)
{
rtems_aio_request *req;
int mode;
if (op != O_SYNC)
5d78: e3500a02 cmp r0, #8192 ; 0x2000
int aio_fsync(
int op,
struct aiocb *aiocbp
)
{
5d7c: e92d4030 push {r4, r5, lr}
5d80: e1a04001 mov r4, r1
rtems_aio_request *req;
int mode;
if (op != O_SYNC)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
5d84: 13a05016 movne r5, #22
)
{
rtems_aio_request *req;
int mode;
if (op != O_SYNC)
5d88: 1a00000c bne 5dc0 <aio_fsync+0x48>
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
5d8c: e5910000 ldr r0, [r1]
5d90: e3a01003 mov r1, #3
5d94: eb00199e bl c414 <fcntl>
if (!(((mode & O_ACCMODE) == O_WRONLY) || ((mode & O_ACCMODE) == O_RDWR)))
5d98: e2000003 and r0, r0, #3
5d9c: e2400001 sub r0, r0, #1
5da0: e3500001 cmp r0, #1
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
5da4: 83a05009 movhi r5, #9
if (op != O_SYNC)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
if (!(((mode & O_ACCMODE) == O_WRONLY) || ((mode & O_ACCMODE) == O_RDWR)))
5da8: 8a000004 bhi 5dc0 <aio_fsync+0x48>
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
req = malloc (sizeof (rtems_aio_request));
5dac: e3a00018 mov r0, #24
5db0: ebfff537 bl 3294 <malloc>
if (req == NULL)
5db4: e2503000 subs r3, r0, #0
5db8: 1a000007 bne 5ddc <aio_fsync+0x64>
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
5dbc: e3a0500b mov r5, #11 <== NOT EXECUTED
5dc0: e3e03000 mvn r3, #0
5dc4: e5845030 str r5, [r4, #48] ; 0x30
5dc8: e5843034 str r3, [r4, #52] ; 0x34
5dcc: eb0026a8 bl f874 <__errno>
5dd0: e5805000 str r5, [r0]
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_SYNC;
return rtems_aio_enqueue (req);
}
5dd4: e3e00000 mvn r0, #0
5dd8: e8bd8030 pop {r4, r5, pc}
req = malloc (sizeof (rtems_aio_request));
if (req == NULL)
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
5ddc: e5834014 str r4, [r3, #20]
req->aiocbp->aio_lio_opcode = LIO_SYNC;
5de0: e3a03003 mov r3, #3
5de4: e584302c str r3, [r4, #44] ; 0x2c
return rtems_aio_enqueue (req);
}
5de8: e8bd4030 pop {r4, r5, lr}
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_SYNC;
return rtems_aio_enqueue (req);
5dec: ea00013b b 62e0 <rtems_aio_enqueue>
000064e8 <aio_read>:
* 0 - otherwise
*/
int
aio_read (struct aiocb *aiocbp)
{
64e8: e92d4030 push {r4, r5, lr}
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
64ec: e3a01003 mov r1, #3
* 0 - otherwise
*/
int
aio_read (struct aiocb *aiocbp)
{
64f0: e1a04000 mov r4, r0
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
64f4: e5900000 ldr r0, [r0]
64f8: eb0017c5 bl c414 <fcntl>
if (!(((mode & O_ACCMODE) == O_RDONLY) || ((mode & O_ACCMODE) == O_RDWR)))
64fc: e2000003 and r0, r0, #3
6500: e3500002 cmp r0, #2
6504: 13500000 cmpne r0, #0
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
6508: 13a05009 movne r5, #9
{
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
if (!(((mode & O_ACCMODE) == O_RDONLY) || ((mode & O_ACCMODE) == O_RDWR)))
650c: 1a00000d bne 6548 <aio_read+0x60>
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
if (aiocbp->aio_reqprio < 0 || aiocbp->aio_reqprio > AIO_PRIO_DELTA_MAX)
6510: e5943014 ldr r3, [r4, #20]
6514: e3530000 cmp r3, #0
6518: 1a000007 bne 653c <aio_read+0x54>
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
if (aiocbp->aio_offset < 0)
651c: e5943008 ldr r3, [r4, #8]
6520: e3530000 cmp r3, #0
6524: ba000004 blt 653c <aio_read+0x54>
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
req = malloc (sizeof (rtems_aio_request));
6528: e3a00018 mov r0, #24
652c: ebfff358 bl 3294 <malloc>
if (req == NULL)
6530: e2503000 subs r3, r0, #0
6534: 1a00000a bne 6564 <aio_read+0x7c>
6538: ea000001 b 6544 <aio_read+0x5c> <== NOT EXECUTED
if (aiocbp->aio_reqprio < 0 || aiocbp->aio_reqprio > AIO_PRIO_DELTA_MAX)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
if (aiocbp->aio_offset < 0)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
653c: e3a05016 mov r5, #22
6540: ea000000 b 6548 <aio_read+0x60>
req = malloc (sizeof (rtems_aio_request));
if (req == NULL)
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
6544: e3a0500b mov r5, #11 <== NOT EXECUTED
6548: e3e03000 mvn r3, #0
654c: e5845030 str r5, [r4, #48] ; 0x30
6550: e5843034 str r3, [r4, #52] ; 0x34
6554: eb0024c6 bl f874 <__errno>
6558: e5805000 str r5, [r0]
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_READ;
return rtems_aio_enqueue (req);
}
655c: e3e00000 mvn r0, #0
6560: e8bd8030 pop {r4, r5, pc}
req = malloc (sizeof (rtems_aio_request));
if (req == NULL)
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
6564: e5834014 str r4, [r3, #20]
req->aiocbp->aio_lio_opcode = LIO_READ;
6568: e3a03001 mov r3, #1
656c: e584302c str r3, [r4, #44] ; 0x2c
return rtems_aio_enqueue (req);
}
6570: e8bd4030 pop {r4, r5, lr}
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_READ;
return rtems_aio_enqueue (req);
6574: eaffff59 b 62e0 <rtems_aio_enqueue>
00006580 <aio_write>:
* 0 - otherwise
*/
int
aio_write (struct aiocb *aiocbp)
{
6580: e92d4030 push {r4, r5, lr}
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
6584: e3a01003 mov r1, #3
* 0 - otherwise
*/
int
aio_write (struct aiocb *aiocbp)
{
6588: e1a04000 mov r4, r0
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
658c: e5900000 ldr r0, [r0]
6590: eb00179f bl c414 <fcntl>
if (!(((mode & O_ACCMODE) == O_WRONLY) || ((mode & O_ACCMODE) == O_RDWR)))
6594: e2000003 and r0, r0, #3
6598: e2400001 sub r0, r0, #1
659c: e3500001 cmp r0, #1
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
65a0: 83a05009 movhi r5, #9
{
rtems_aio_request *req;
int mode;
mode = fcntl (aiocbp->aio_fildes, F_GETFL);
if (!(((mode & O_ACCMODE) == O_WRONLY) || ((mode & O_ACCMODE) == O_RDWR)))
65a4: 8a00000d bhi 65e0 <aio_write+0x60>
rtems_aio_set_errno_return_minus_one (EBADF, aiocbp);
if (aiocbp->aio_reqprio < 0 || aiocbp->aio_reqprio > AIO_PRIO_DELTA_MAX)
65a8: e5943014 ldr r3, [r4, #20]
65ac: e3530000 cmp r3, #0
65b0: 1a000007 bne 65d4 <aio_write+0x54>
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
if (aiocbp->aio_offset < 0)
65b4: e5943008 ldr r3, [r4, #8]
65b8: e3530000 cmp r3, #0
65bc: ba000004 blt 65d4 <aio_write+0x54>
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
req = malloc (sizeof (rtems_aio_request));
65c0: e3a00018 mov r0, #24
65c4: ebfff332 bl 3294 <malloc>
if (req == NULL)
65c8: e2503000 subs r3, r0, #0
65cc: 1a00000a bne 65fc <aio_write+0x7c>
65d0: ea000001 b 65dc <aio_write+0x5c> <== NOT EXECUTED
if (aiocbp->aio_reqprio < 0 || aiocbp->aio_reqprio > AIO_PRIO_DELTA_MAX)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
if (aiocbp->aio_offset < 0)
rtems_aio_set_errno_return_minus_one (EINVAL, aiocbp);
65d4: e3a05016 mov r5, #22
65d8: ea000000 b 65e0 <aio_write+0x60>
req = malloc (sizeof (rtems_aio_request));
if (req == NULL)
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
65dc: e3a0500b mov r5, #11 <== NOT EXECUTED
65e0: e3e03000 mvn r3, #0
65e4: e5845030 str r5, [r4, #48] ; 0x30
65e8: e5843034 str r3, [r4, #52] ; 0x34
65ec: eb0024a0 bl f874 <__errno>
65f0: e5805000 str r5, [r0]
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_WRITE;
return rtems_aio_enqueue (req);
}
65f4: e3e00000 mvn r0, #0
65f8: e8bd8030 pop {r4, r5, pc}
req = malloc (sizeof (rtems_aio_request));
if (req == NULL)
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
65fc: e5834014 str r4, [r3, #20]
req->aiocbp->aio_lio_opcode = LIO_WRITE;
6600: e3a03002 mov r3, #2
6604: e584302c str r3, [r4, #44] ; 0x2c
return rtems_aio_enqueue (req);
}
6608: e8bd4030 pop {r4, r5, lr}
rtems_aio_set_errno_return_minus_one (EAGAIN, aiocbp);
req->aiocbp = aiocbp;
req->aiocbp->aio_lio_opcode = LIO_WRITE;
return rtems_aio_enqueue (req);
660c: eaffff33 b 62e0 <rtems_aio_enqueue>
0000a63c <pthread_attr_setschedpolicy>:
int pthread_attr_setschedpolicy(
pthread_attr_t *attr,
int policy
)
{
if ( !attr || !attr->is_initialized )
a63c: e3500000 cmp r0, #0
a640: 0a00000b beq a674 <pthread_attr_setschedpolicy+0x38>
a644: e5903000 ldr r3, [r0]
a648: e3530000 cmp r3, #0
a64c: 0a000008 beq a674 <pthread_attr_setschedpolicy+0x38>
return EINVAL;
switch ( policy ) {
a650: e3510004 cmp r1, #4
a654: 8a000008 bhi a67c <pthread_attr_setschedpolicy+0x40>
a658: e3a03001 mov r3, #1
a65c: e1a03113 lsl r3, r3, r1
a660: e3130017 tst r3, #23
case SCHED_OTHER:
case SCHED_FIFO:
case SCHED_RR:
case SCHED_SPORADIC:
attr->schedpolicy = policy;
a664: 15801014 strne r1, [r0, #20]
return 0;
a668: 13a00000 movne r0, #0
)
{
if ( !attr || !attr->is_initialized )
return EINVAL;
switch ( policy ) {
a66c: 112fff1e bxne lr
a670: ea000001 b a67c <pthread_attr_setschedpolicy+0x40> <== NOT EXECUTED
pthread_attr_t *attr,
int policy
)
{
if ( !attr || !attr->is_initialized )
return EINVAL;
a674: e3a00016 mov r0, #22
a678: e12fff1e bx lr
case SCHED_SPORADIC:
attr->schedpolicy = policy;
return 0;
default:
return ENOTSUP;
a67c: e3a00086 mov r0, #134 ; 0x86
}
}
a680: e12fff1e bx lr
00007480 <pthread_mutexattr_setpshared>:
int pthread_mutexattr_setpshared(
pthread_mutexattr_t *attr,
int pshared
)
{
if ( !attr || !attr->is_initialized )
7480: e3500000 cmp r0, #0
7484: 0a000007 beq 74a8 <pthread_mutexattr_setpshared+0x28>
7488: e5903000 ldr r3, [r0]
748c: e3530000 cmp r3, #0
7490: 0a000004 beq 74a8 <pthread_mutexattr_setpshared+0x28>
return EINVAL;
switch ( pshared ) {
7494: e3510001 cmp r1, #1
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
attr->process_shared = pshared;
7498: 95801004 strls r1, [r0, #4]
return 0;
749c: 93a00000 movls r0, #0
)
{
if ( !attr || !attr->is_initialized )
return EINVAL;
switch ( pshared ) {
74a0: 912fff1e bxls lr
74a4: ea000001 b 74b0 <pthread_mutexattr_setpshared+0x30> <== NOT EXECUTED
pthread_mutexattr_t *attr,
int pshared
)
{
if ( !attr || !attr->is_initialized )
return EINVAL;
74a8: e3a00016 mov r0, #22
74ac: e12fff1e bx lr
case PTHREAD_PROCESS_PRIVATE:
attr->process_shared = pshared;
return 0;
default:
return EINVAL;
74b0: e3a00016 mov r0, #22 <== NOT EXECUTED
}
}
74b4: e12fff1e bx lr <== NOT EXECUTED
000070ec <pthread_rwlockattr_setpshared>:
int pthread_rwlockattr_setpshared(
pthread_rwlockattr_t *attr,
int pshared
)
{
if ( !attr )
70ec: e3500000 cmp r0, #0
70f0: 0a000007 beq 7114 <pthread_rwlockattr_setpshared+0x28>
return EINVAL;
if ( !attr->is_initialized )
70f4: e5903000 ldr r3, [r0]
70f8: e3530000 cmp r3, #0
70fc: 0a000004 beq 7114 <pthread_rwlockattr_setpshared+0x28>
return EINVAL;
switch ( pshared ) {
7100: e3510001 cmp r1, #1
case PTHREAD_PROCESS_SHARED:
case PTHREAD_PROCESS_PRIVATE:
attr->process_shared = pshared;
7104: 95801004 strls r1, [r0, #4]
return 0;
7108: 93a00000 movls r0, #0
return EINVAL;
if ( !attr->is_initialized )
return EINVAL;
switch ( pshared ) {
710c: 912fff1e bxls lr
7110: ea000001 b 711c <pthread_rwlockattr_setpshared+0x30>
{
if ( !attr )
return EINVAL;
if ( !attr->is_initialized )
return EINVAL;
7114: e3a00016 mov r0, #22
7118: e12fff1e bx lr
case PTHREAD_PROCESS_PRIVATE:
attr->process_shared = pshared;
return 0;
default:
return EINVAL;
711c: e3a00016 mov r0, #22 <== NOT EXECUTED
}
}
7120: e12fff1e bx lr <== NOT EXECUTED
000062e0 <rtems_aio_enqueue>:
* errno - otherwise
*/
int
rtems_aio_enqueue (rtems_aio_request *req)
{
62e0: e92d47f0 push {r4, r5, r6, r7, r8, r9, sl, lr}
struct sched_param param;
/* The queue should be initialized */
AIO_assert (aio_request_queue.initialized == AIO_QUEUE_INITIALIZED);
result = pthread_mutex_lock (&aio_request_queue.mutex);
62e4: e59f41ec ldr r4, [pc, #492] ; 64d8 <rtems_aio_enqueue+0x1f8>
* errno - otherwise
*/
int
rtems_aio_enqueue (rtems_aio_request *req)
{
62e8: e24dd024 sub sp, sp, #36 ; 0x24
62ec: e1a06000 mov r6, r0
struct sched_param param;
/* The queue should be initialized */
AIO_assert (aio_request_queue.initialized == AIO_QUEUE_INITIALIZED);
result = pthread_mutex_lock (&aio_request_queue.mutex);
62f0: e1a00004 mov r0, r4
62f4: eb00024d bl 6c30 <pthread_mutex_lock>
if (result != 0) {
62f8: e2505000 subs r5, r0, #0
62fc: 0a000002 beq 630c <rtems_aio_enqueue+0x2c>
free (req);
6300: e1a00006 mov r0, r6 <== NOT EXECUTED
6304: ebfff27c bl 2cfc <free> <== NOT EXECUTED
return result;
6308: ea00006f b 64cc <rtems_aio_enqueue+0x1ec> <== NOT EXECUTED
}
/* _POSIX_PRIORITIZED_IO and _POSIX_PRIORITY_SCHEDULING are defined,
we can use aio_reqprio to lower the priority of the request */
pthread_getschedparam (pthread_self(), &policy, ¶m);
630c: eb000454 bl 7464 <pthread_self>
6310: e28d101c add r1, sp, #28
6314: e1a0200d mov r2, sp
6318: eb000359 bl 7084 <pthread_getschedparam>
req->caller_thread = pthread_self ();
631c: eb000450 bl 7464 <pthread_self>
req->priority = param.sched_priority - req->aiocbp->aio_reqprio;
6320: e5963014 ldr r3, [r6, #20]
6324: e59d1000 ldr r1, [sp]
6328: e5932014 ldr r2, [r3, #20]
632c: e0622001 rsb r2, r2, r1
6330: e586200c str r2, [r6, #12]
req->policy = policy;
6334: e59d201c ldr r2, [sp, #28]
6338: e5862008 str r2, [r6, #8]
req->aiocbp->error_code = EINPROGRESS;
633c: e3a02077 mov r2, #119 ; 0x77
/* _POSIX_PRIORITIZED_IO and _POSIX_PRIORITY_SCHEDULING are defined,
we can use aio_reqprio to lower the priority of the request */
pthread_getschedparam (pthread_self(), &policy, ¶m);
req->caller_thread = pthread_self ();
6340: e5860010 str r0, [r6, #16]
req->priority = param.sched_priority - req->aiocbp->aio_reqprio;
req->policy = policy;
req->aiocbp->error_code = EINPROGRESS;
6344: e5832030 str r2, [r3, #48] ; 0x30
req->aiocbp->return_value = 0;
if ((aio_request_queue.idle_threads == 0) &&
6348: e5942068 ldr r2, [r4, #104] ; 0x68
634c: e3520000 cmp r2, #0
req->caller_thread = pthread_self ();
req->priority = param.sched_priority - req->aiocbp->aio_reqprio;
req->policy = policy;
req->aiocbp->error_code = EINPROGRESS;
req->aiocbp->return_value = 0;
6350: e5835034 str r5, [r3, #52] ; 0x34
if ((aio_request_queue.idle_threads == 0) &&
6354: 1a00002e bne 6414 <rtems_aio_enqueue+0x134>
6358: e5942064 ldr r2, [r4, #100] ; 0x64
635c: e3520004 cmp r2, #4
6360: ca00002b bgt 6414 <rtems_aio_enqueue+0x134>
aio_request_queue.active_threads < AIO_MAX_THREADS)
/* we still have empty places on the active_threads chain */
{
chain = &aio_request_queue.work_req;
r_chain = rtems_aio_search_fd (chain, req->aiocbp->aio_fildes, 1);
6364: e5931000 ldr r1, [r3]
6368: e2840048 add r0, r4, #72 ; 0x48
636c: e3a02001 mov r2, #1
6370: ebfffece bl 5eb0 <rtems_aio_search_fd>
if (r_chain->new_fd == 1) {
6374: e5903018 ldr r3, [r0, #24]
6378: e3530001 cmp r3, #1
if ((aio_request_queue.idle_threads == 0) &&
aio_request_queue.active_threads < AIO_MAX_THREADS)
/* we still have empty places on the active_threads chain */
{
chain = &aio_request_queue.work_req;
r_chain = rtems_aio_search_fd (chain, req->aiocbp->aio_fildes, 1);
637c: e1a07000 mov r7, r0
6380: e2809008 add r9, r0, #8
6384: e280801c add r8, r0, #28
6388: e280a020 add sl, r0, #32
if (r_chain->new_fd == 1) {
638c: 1a000017 bne 63f0 <rtems_aio_enqueue+0x110>
RTEMS_INLINE_ROUTINE void _Chain_Prepend(
Chain_Control *the_chain,
Chain_Node *the_node
)
{
_Chain_Insert(_Chain_Head(the_chain), the_node);
6390: e1a01006 mov r1, r6
6394: e1a00009 mov r0, r9
6398: eb000850 bl 84e0 <_Chain_Insert>
rtems_chain_prepend (&r_chain->perfd, &req->next_prio);
r_chain->new_fd = 0;
pthread_mutex_init (&r_chain->mutex, NULL);
639c: e1a01005 mov r1, r5
chain = &aio_request_queue.work_req;
r_chain = rtems_aio_search_fd (chain, req->aiocbp->aio_fildes, 1);
if (r_chain->new_fd == 1) {
rtems_chain_prepend (&r_chain->perfd, &req->next_prio);
r_chain->new_fd = 0;
63a0: e5875018 str r5, [r7, #24]
pthread_mutex_init (&r_chain->mutex, NULL);
63a4: e1a00008 mov r0, r8
63a8: eb0001ce bl 6ae8 <pthread_mutex_init>
pthread_cond_init (&r_chain->cond, NULL);
63ac: e1a01005 mov r1, r5
63b0: e1a0000a mov r0, sl
63b4: eb0000e3 bl 6748 <pthread_cond_init>
AIO_printf ("New thread \n");
result = pthread_create (&thid, &aio_request_queue.attr,
63b8: e1a03007 mov r3, r7
63bc: e28d0020 add r0, sp, #32
63c0: e2841008 add r1, r4, #8
63c4: e59f2110 ldr r2, [pc, #272] ; 64dc <rtems_aio_enqueue+0x1fc>
63c8: eb000299 bl 6e34 <pthread_create>
rtems_aio_handle, (void *) r_chain);
if (result != 0) {
63cc: e2506000 subs r6, r0, #0
pthread_mutex_unlock (&aio_request_queue.mutex);
return result;
}
++aio_request_queue.active_threads;
63d0: 05943064 ldreq r3, [r4, #100] ; 0x64
63d4: 02833001 addeq r3, r3, #1
63d8: 05843064 streq r3, [r4, #100] ; 0x64
pthread_cond_init (&r_chain->cond, NULL);
AIO_printf ("New thread \n");
result = pthread_create (&thid, &aio_request_queue.attr,
rtems_aio_handle, (void *) r_chain);
if (result != 0) {
63dc: 0a000038 beq 64c4 <rtems_aio_enqueue+0x1e4>
pthread_mutex_unlock (&aio_request_queue.mutex);
63e0: e1a00004 mov r0, r4 <== NOT EXECUTED
63e4: eb000230 bl 6cac <pthread_mutex_unlock> <== NOT EXECUTED
return result;
63e8: e1a05006 mov r5, r6 <== NOT EXECUTED
63ec: ea000036 b 64cc <rtems_aio_enqueue+0x1ec> <== NOT EXECUTED
}
++aio_request_queue.active_threads;
}
else {
/* put request in the fd chain it belongs to */
pthread_mutex_lock (&r_chain->mutex);
63f0: e1a00008 mov r0, r8
63f4: eb00020d bl 6c30 <pthread_mutex_lock>
rtems_aio_insert_prio (&r_chain->perfd, req);
63f8: e1a00009 mov r0, r9
63fc: e1a01006 mov r1, r6
6400: ebffff76 bl 61e0 <rtems_aio_insert_prio>
pthread_cond_signal (&r_chain->cond);
6404: e1a0000a mov r0, sl
6408: eb0000fe bl 6808 <pthread_cond_signal>
pthread_mutex_unlock (&r_chain->mutex);
640c: e1a00008 mov r0, r8
6410: ea00000e b 6450 <rtems_aio_enqueue+0x170>
else
{
/* the maximum number of threads has been already created
even though some of them might be idle.
The request belongs to one of the active fd chain */
r_chain = rtems_aio_search_fd (&aio_request_queue.work_req,
6414: e59f00c4 ldr r0, [pc, #196] ; 64e0 <rtems_aio_enqueue+0x200>
6418: e5931000 ldr r1, [r3]
641c: e3a02000 mov r2, #0
6420: ebfffea2 bl 5eb0 <rtems_aio_search_fd>
req->aiocbp->aio_fildes, 0);
if (r_chain != NULL)
6424: e2504000 subs r4, r0, #0
6428: 0a00000a beq 6458 <rtems_aio_enqueue+0x178>
{
pthread_mutex_lock (&r_chain->mutex);
642c: e284701c add r7, r4, #28
6430: e1a00007 mov r0, r7
6434: eb0001fd bl 6c30 <pthread_mutex_lock>
rtems_aio_insert_prio (&r_chain->perfd, req);
6438: e2840008 add r0, r4, #8
643c: e1a01006 mov r1, r6
6440: ebffff66 bl 61e0 <rtems_aio_insert_prio>
pthread_cond_signal (&r_chain->cond);
6444: e2840020 add r0, r4, #32
6448: eb0000ee bl 6808 <pthread_cond_signal>
pthread_mutex_unlock (&r_chain->mutex);
644c: e1a00007 mov r0, r7
6450: eb000215 bl 6cac <pthread_mutex_unlock>
6454: ea00001a b 64c4 <rtems_aio_enqueue+0x1e4>
} else {
/* or to the idle chain */
chain = &aio_request_queue.idle_req;
r_chain = rtems_aio_search_fd (chain, req->aiocbp->aio_fildes, 1);
6458: e5963014 ldr r3, [r6, #20]
645c: e59f0080 ldr r0, [pc, #128] ; 64e4 <rtems_aio_enqueue+0x204>
6460: e5931000 ldr r1, [r3]
6464: e3a02001 mov r2, #1
6468: ebfffe90 bl 5eb0 <rtems_aio_search_fd>
if (r_chain->new_fd == 1) {
646c: e5903018 ldr r3, [r0, #24]
6470: e3530001 cmp r3, #1
} else {
/* or to the idle chain */
chain = &aio_request_queue.idle_req;
r_chain = rtems_aio_search_fd (chain, req->aiocbp->aio_fildes, 1);
6474: e1a07000 mov r7, r0
6478: e2800008 add r0, r0, #8
if (r_chain->new_fd == 1) {
647c: 1a000009 bne 64a8 <rtems_aio_enqueue+0x1c8>
6480: e1a01006 mov r1, r6
6484: eb000815 bl 84e0 <_Chain_Insert>
/* If this is a new fd chain we signal the idle threads that
might be waiting for requests */
AIO_printf (" New chain on waiting queue \n ");
rtems_chain_prepend (&r_chain->perfd, &req->next_prio);
r_chain->new_fd = 0;
pthread_mutex_init (&r_chain->mutex, NULL);
6488: e1a01004 mov r1, r4
if (r_chain->new_fd == 1) {
/* If this is a new fd chain we signal the idle threads that
might be waiting for requests */
AIO_printf (" New chain on waiting queue \n ");
rtems_chain_prepend (&r_chain->perfd, &req->next_prio);
r_chain->new_fd = 0;
648c: e5874018 str r4, [r7, #24]
pthread_mutex_init (&r_chain->mutex, NULL);
6490: e287001c add r0, r7, #28
6494: eb000193 bl 6ae8 <pthread_mutex_init>
pthread_cond_init (&r_chain->cond, NULL);
6498: e2870020 add r0, r7, #32
649c: e1a01004 mov r1, r4
64a0: eb0000a8 bl 6748 <pthread_cond_init>
64a4: ea000001 b 64b0 <rtems_aio_enqueue+0x1d0>
} else
/* just insert the request in the existing fd chain */
rtems_aio_insert_prio (&r_chain->perfd, req);
64a8: e1a01006 mov r1, r6
64ac: ebffff4b bl 61e0 <rtems_aio_insert_prio>
if (aio_request_queue.idle_threads > 0)
64b0: e59f0020 ldr r0, [pc, #32] ; 64d8 <rtems_aio_enqueue+0x1f8>
64b4: e5903068 ldr r3, [r0, #104] ; 0x68
64b8: e3530000 cmp r3, #0
pthread_cond_signal (&aio_request_queue.new_req);
64bc: c2800004 addgt r0, r0, #4
64c0: cb0000d0 blgt 6808 <pthread_cond_signal>
}
}
pthread_mutex_unlock (&aio_request_queue.mutex);
64c4: e59f000c ldr r0, [pc, #12] ; 64d8 <rtems_aio_enqueue+0x1f8>
64c8: eb0001f7 bl 6cac <pthread_mutex_unlock>
return 0;
}
64cc: e1a00005 mov r0, r5
64d0: e28dd024 add sp, sp, #36 ; 0x24
64d4: e8bd87f0 pop {r4, r5, r6, r7, r8, r9, sl, pc}
00005f94 <rtems_aio_handle>:
* NULL - if error
*/
static void *
rtems_aio_handle (void *arg)
{
5f94: e92d4ff0 push {r4, r5, r6, r7, r8, r9, sl, fp, lr}
struct timespec timeout;
AIO_printf ("Chain is empty [WQ], wait for work\n");
pthread_mutex_unlock (&r_chain->mutex);
pthread_mutex_lock (&aio_request_queue.mutex);
5f98: e59f4238 ldr r4, [pc, #568] ; 61d8 <rtems_aio_handle+0x244>
* NULL - if error
*/
static void *
rtems_aio_handle (void *arg)
{
5f9c: e24dd02c sub sp, sp, #44 ; 0x2c
5fa0: e1a05000 mov r5, r0
pthread_mutex_unlock (&r_chain->mutex);
pthread_mutex_lock (&aio_request_queue.mutex);
if (rtems_chain_is_empty (chain))
{
clock_gettime (CLOCK_REALTIME, &timeout);
5fa4: e28d7020 add r7, sp, #32
node = rtems_chain_first (chain);
req = (rtems_aio_request *) node;
/* See _POSIX_PRIORITIZE_IO and _POSIX_PRIORITY_SCHEDULING
discussion in rtems_aio_enqueue () */
pthread_getschedparam (pthread_self(), &policy, ¶m);
5fa8: e28db004 add fp, sp, #4
/* acquire the mutex of the current fd chain.
we don't need to lock the queue mutex since we can
add requests to idle fd chains or even active ones
if the working request has been extracted from the
chain */
result = pthread_mutex_lock (&r_chain->mutex);
5fac: e285a01c add sl, r5, #28
5fb0: e1a0000a mov r0, sl
5fb4: eb00031d bl 6c30 <pthread_mutex_lock>
if (result != 0)
5fb8: e2509000 subs r9, r0, #0
5fbc: 1a000082 bne 61cc <rtems_aio_handle+0x238>
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
5fc0: e5956008 ldr r6, [r5, #8]
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(
const Chain_Control *the_chain
)
{
return _Chain_Immutable_first( the_chain )
== _Chain_Immutable_tail( the_chain );
5fc4: e285300c add r3, r5, #12
/* If the locked chain is not empty, take the first
request extract it, unlock the chain and process
the request, in this way the user can supply more
requests to this fd chain */
if (!rtems_chain_is_empty (chain)) {
5fc8: e1560003 cmp r6, r3
5fcc: 0a000035 beq 60a8 <rtems_aio_handle+0x114>
node = rtems_chain_first (chain);
req = (rtems_aio_request *) node;
/* See _POSIX_PRIORITIZE_IO and _POSIX_PRIORITY_SCHEDULING
discussion in rtems_aio_enqueue () */
pthread_getschedparam (pthread_self(), &policy, ¶m);
5fd0: eb000523 bl 7464 <pthread_self>
5fd4: e28d1028 add r1, sp, #40 ; 0x28
5fd8: e1a0200b mov r2, fp
5fdc: eb000428 bl 7084 <pthread_getschedparam>
param.sched_priority = req->priority;
5fe0: e596300c ldr r3, [r6, #12]
5fe4: e58d3004 str r3, [sp, #4]
pthread_setschedparam (pthread_self(), req->policy, ¶m);
5fe8: eb00051d bl 7464 <pthread_self>
5fec: e1a0200b mov r2, fp
5ff0: e5961008 ldr r1, [r6, #8]
5ff4: eb00051f bl 7478 <pthread_setschedparam>
*/
RTEMS_INLINE_ROUTINE void rtems_chain_extract(
rtems_chain_node *the_node
)
{
_Chain_Extract( the_node );
5ff8: e1a00006 mov r0, r6
5ffc: eb000922 bl 848c <_Chain_Extract>
rtems_chain_extract (node);
pthread_mutex_unlock (&r_chain->mutex);
6000: e1a0000a mov r0, sl
6004: eb000328 bl 6cac <pthread_mutex_unlock>
switch (req->aiocbp->aio_lio_opcode) {
6008: e5962014 ldr r2, [r6, #20]
600c: e592302c ldr r3, [r2, #44] ; 0x2c
6010: e3530002 cmp r3, #2
6014: 0a00000b beq 6048 <rtems_aio_handle+0xb4>
6018: e3530003 cmp r3, #3
601c: 0a000011 beq 6068 <rtems_aio_handle+0xd4>
6020: e3530001 cmp r3, #1
6024: 1a000013 bne 6078 <rtems_aio_handle+0xe4>
case LIO_READ:
AIO_printf ("read\n");
result = pread (req->aiocbp->aio_fildes,
6028: e5921008 ldr r1, [r2, #8]
602c: e5923004 ldr r3, [r2, #4]
6030: e58d1000 str r1, [sp]
6034: e592100c ldr r1, [r2, #12]
6038: e5920000 ldr r0, [r2]
603c: e5922010 ldr r2, [r2, #16]
6040: eb00290f bl 10484 <pread>
(void *) req->aiocbp->aio_buf,
req->aiocbp->aio_nbytes, req->aiocbp->aio_offset);
break;
6044: ea000009 b 6070 <rtems_aio_handle+0xdc>
case LIO_WRITE:
AIO_printf ("write\n");
result = pwrite (req->aiocbp->aio_fildes,
6048: e5921008 ldr r1, [r2, #8]
604c: e5923004 ldr r3, [r2, #4]
6050: e58d1000 str r1, [sp]
6054: e592100c ldr r1, [r2, #12]
6058: e5920000 ldr r0, [r2]
605c: e5922010 ldr r2, [r2, #16]
6060: eb002951 bl 105ac <pwrite>
(void *) req->aiocbp->aio_buf,
req->aiocbp->aio_nbytes, req->aiocbp->aio_offset);
break;
6064: ea000001 b 6070 <rtems_aio_handle+0xdc>
case LIO_SYNC:
AIO_printf ("sync\n");
result = fsync (req->aiocbp->aio_fildes);
6068: e5920000 ldr r0, [r2] <== NOT EXECUTED
606c: eb00195d bl c5e8 <fsync> <== NOT EXECUTED
break;
default:
result = -1;
}
if (result == -1) {
6070: e3700001 cmn r0, #1
6074: 1a000006 bne 6094 <rtems_aio_handle+0x100>
req->aiocbp->return_value = -1;
6078: e5966014 ldr r6, [r6, #20] <== NOT EXECUTED
607c: e3e02000 mvn r2, #0 <== NOT EXECUTED
6080: e5862034 str r2, [r6, #52] ; 0x34 <== NOT EXECUTED
req->aiocbp->error_code = errno;
6084: eb0025fa bl f874 <__errno> <== NOT EXECUTED
6088: e5903000 ldr r3, [r0] <== NOT EXECUTED
608c: e5863030 str r3, [r6, #48] ; 0x30 <== NOT EXECUTED
6090: eaffffc5 b 5fac <rtems_aio_handle+0x18> <== NOT EXECUTED
} else {
req->aiocbp->return_value = result;
6094: e5963014 ldr r3, [r6, #20]
req->aiocbp->error_code = 0;
6098: e3a02000 mov r2, #0
}
if (result == -1) {
req->aiocbp->return_value = -1;
req->aiocbp->error_code = errno;
} else {
req->aiocbp->return_value = result;
609c: e5830034 str r0, [r3, #52] ; 0x34
req->aiocbp->error_code = 0;
60a0: e5832030 str r2, [r3, #48] ; 0x30
60a4: eaffffc0 b 5fac <rtems_aio_handle+0x18>
struct timespec timeout;
AIO_printf ("Chain is empty [WQ], wait for work\n");
pthread_mutex_unlock (&r_chain->mutex);
pthread_mutex_lock (&aio_request_queue.mutex);
60a8: e59f8128 ldr r8, [pc, #296] ; 61d8 <rtems_aio_handle+0x244>
struct timespec timeout;
AIO_printf ("Chain is empty [WQ], wait for work\n");
pthread_mutex_unlock (&r_chain->mutex);
60ac: e1a0000a mov r0, sl
60b0: eb0002fd bl 6cac <pthread_mutex_unlock>
pthread_mutex_lock (&aio_request_queue.mutex);
60b4: e1a00008 mov r0, r8
60b8: eb0002dc bl 6c30 <pthread_mutex_lock>
if (rtems_chain_is_empty (chain))
60bc: e5953008 ldr r3, [r5, #8]
60c0: e1530006 cmp r3, r6
60c4: 1a00003d bne 61c0 <rtems_aio_handle+0x22c>
{
clock_gettime (CLOCK_REALTIME, &timeout);
60c8: e1a01007 mov r1, r7
60cc: e3a00001 mov r0, #1
60d0: eb00014e bl 6610 <clock_gettime>
timeout.tv_sec += 3;
60d4: e59d3020 ldr r3, [sp, #32]
timeout.tv_nsec = 0;
result = pthread_cond_timedwait (&r_chain->cond,
60d8: e2856020 add r6, r5, #32
pthread_mutex_lock (&aio_request_queue.mutex);
if (rtems_chain_is_empty (chain))
{
clock_gettime (CLOCK_REALTIME, &timeout);
timeout.tv_sec += 3;
60dc: e2833003 add r3, r3, #3
timeout.tv_nsec = 0;
result = pthread_cond_timedwait (&r_chain->cond,
60e0: e1a00006 mov r0, r6
60e4: e1a01008 mov r1, r8
60e8: e1a02007 mov r2, r7
pthread_mutex_lock (&aio_request_queue.mutex);
if (rtems_chain_is_empty (chain))
{
clock_gettime (CLOCK_REALTIME, &timeout);
timeout.tv_sec += 3;
60ec: e58d3020 str r3, [sp, #32]
timeout.tv_nsec = 0;
60f0: e58d9024 str r9, [sp, #36] ; 0x24
result = pthread_cond_timedwait (&r_chain->cond,
60f4: eb0001da bl 6864 <pthread_cond_timedwait>
&aio_request_queue.mutex,
&timeout);
/* If no requests were added to the chain we delete the fd chain from
the queue and start working with idle fd chains */
if (result == ETIMEDOUT) {
60f8: e3500074 cmp r0, #116 ; 0x74
60fc: 1a00002f bne 61c0 <rtems_aio_handle+0x22c>
6100: e1a00005 mov r0, r5
6104: eb0008e0 bl 848c <_Chain_Extract>
rtems_chain_extract (&r_chain->next_fd);
pthread_mutex_destroy (&r_chain->mutex);
6108: e1a0000a mov r0, sl
610c: eb000229 bl 69b8 <pthread_mutex_destroy>
pthread_cond_destroy (&r_chain->cond);
6110: e1a00006 mov r0, r6
6114: eb00015b bl 6688 <pthread_cond_destroy>
free (r_chain);
6118: e1a00005 mov r0, r5
611c: ebfff2f6 bl 2cfc <free>
/* If the idle chain is empty sleep for 3 seconds and wait for a
signal. The thread now becomes idle. */
if (rtems_chain_is_empty (&aio_request_queue.idle_req)) {
6120: e5943054 ldr r3, [r4, #84] ; 0x54
6124: e59f20b0 ldr r2, [pc, #176] ; 61dc <rtems_aio_handle+0x248>
6128: e1530002 cmp r3, r2
612c: 1a000018 bne 6194 <rtems_aio_handle+0x200>
AIO_printf ("Chain is empty [IQ], wait for work\n");
++aio_request_queue.idle_threads;
6130: e5943068 ldr r3, [r4, #104] ; 0x68
6134: e2833001 add r3, r3, #1
6138: e5843068 str r3, [r4, #104] ; 0x68
--aio_request_queue.active_threads;
613c: e5943064 ldr r3, [r4, #100] ; 0x64
clock_gettime (CLOCK_REALTIME, &timeout);
6140: e1a01007 mov r1, r7
signal. The thread now becomes idle. */
if (rtems_chain_is_empty (&aio_request_queue.idle_req)) {
AIO_printf ("Chain is empty [IQ], wait for work\n");
++aio_request_queue.idle_threads;
--aio_request_queue.active_threads;
6144: e2433001 sub r3, r3, #1
clock_gettime (CLOCK_REALTIME, &timeout);
6148: e3a00001 mov r0, #1
signal. The thread now becomes idle. */
if (rtems_chain_is_empty (&aio_request_queue.idle_req)) {
AIO_printf ("Chain is empty [IQ], wait for work\n");
++aio_request_queue.idle_threads;
--aio_request_queue.active_threads;
614c: e5843064 str r3, [r4, #100] ; 0x64
clock_gettime (CLOCK_REALTIME, &timeout);
6150: eb00012e bl 6610 <clock_gettime>
timeout.tv_sec += 3;
6154: e59d3020 ldr r3, [sp, #32]
timeout.tv_nsec = 0;
result = pthread_cond_timedwait (&aio_request_queue.new_req,
6158: e2880004 add r0, r8, #4
AIO_printf ("Chain is empty [IQ], wait for work\n");
++aio_request_queue.idle_threads;
--aio_request_queue.active_threads;
clock_gettime (CLOCK_REALTIME, &timeout);
timeout.tv_sec += 3;
615c: e2833003 add r3, r3, #3
timeout.tv_nsec = 0;
result = pthread_cond_timedwait (&aio_request_queue.new_req,
6160: e1a01008 mov r1, r8
6164: e1a02007 mov r2, r7
AIO_printf ("Chain is empty [IQ], wait for work\n");
++aio_request_queue.idle_threads;
--aio_request_queue.active_threads;
clock_gettime (CLOCK_REALTIME, &timeout);
timeout.tv_sec += 3;
6168: e58d3020 str r3, [sp, #32]
timeout.tv_nsec = 0;
616c: e58d9024 str r9, [sp, #36] ; 0x24
result = pthread_cond_timedwait (&aio_request_queue.new_req,
6170: eb0001bb bl 6864 <pthread_cond_timedwait>
&aio_request_queue.mutex,
&timeout);
/* If no new fd chain was added in the idle requests
then this thread is finished */
if (result == ETIMEDOUT) {
6174: e3500074 cmp r0, #116 ; 0x74
6178: 1a000005 bne 6194 <rtems_aio_handle+0x200>
AIO_printf ("Etimeout\n");
--aio_request_queue.idle_threads;
617c: e5983068 ldr r3, [r8, #104] ; 0x68
pthread_mutex_unlock (&aio_request_queue.mutex);
6180: e1a00008 mov r0, r8
/* If no new fd chain was added in the idle requests
then this thread is finished */
if (result == ETIMEDOUT) {
AIO_printf ("Etimeout\n");
--aio_request_queue.idle_threads;
6184: e2433001 sub r3, r3, #1
6188: e5883068 str r3, [r8, #104] ; 0x68
pthread_mutex_unlock (&aio_request_queue.mutex);
618c: eb0002c6 bl 6cac <pthread_mutex_unlock>
return NULL;
6190: ea00000d b 61cc <rtems_aio_handle+0x238>
}
}
/* Otherwise move this chain to the working chain and
start the loop all over again */
AIO_printf ("Work on idle\n");
--aio_request_queue.idle_threads;
6194: e5943068 ldr r3, [r4, #104] ; 0x68
6198: e2433001 sub r3, r3, #1
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
619c: e5945054 ldr r5, [r4, #84] ; 0x54
}
}
/* Otherwise move this chain to the working chain and
start the loop all over again */
AIO_printf ("Work on idle\n");
--aio_request_queue.idle_threads;
61a0: e5843068 str r3, [r4, #104] ; 0x68
++aio_request_queue.active_threads;
61a4: e5943064 ldr r3, [r4, #100] ; 0x64
61a8: e1a00005 mov r0, r5
61ac: e2833001 add r3, r3, #1
61b0: e5843064 str r3, [r4, #100] ; 0x64
61b4: eb0008b4 bl 848c <_Chain_Extract>
node = rtems_chain_first (&aio_request_queue.idle_req);
rtems_chain_extract (node);
r_chain = (rtems_aio_request_chain *) node;
rtems_aio_move_to_work (r_chain);
61b8: e1a00005 mov r0, r5
61bc: ebffff65 bl 5f58 <rtems_aio_move_to_work>
}
}
/* If there was a request added in the initial fd chain then release
the mutex and process it */
pthread_mutex_unlock (&aio_request_queue.mutex);
61c0: e59f0010 ldr r0, [pc, #16] ; 61d8 <rtems_aio_handle+0x244>
61c4: eb0002b8 bl 6cac <pthread_mutex_unlock>
61c8: eaffff77 b 5fac <rtems_aio_handle+0x18>
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
61cc: e3a00000 mov r0, #0
61d0: e28dd02c add sp, sp, #44 ; 0x2c
61d4: e8bd8ff0 pop {r4, r5, r6, r7, r8, r9, sl, fp, pc}
00005df0 <rtems_aio_init>:
* 0 - if initialization succeeded
*/
int
rtems_aio_init (void)
{
5df0: e92d4010 push {r4, lr}
int result = 0;
result = pthread_attr_init (&aio_request_queue.attr);
5df4: e59f00a4 ldr r0, [pc, #164] ; 5ea0 <rtems_aio_init+0xb0>
5df8: eb0003ed bl 6db4 <pthread_attr_init>
if (result != 0)
5dfc: e2504000 subs r4, r0, #0
5e00: 1a000024 bne 5e98 <rtems_aio_init+0xa8>
return result;
result =
5e04: e59f0094 ldr r0, [pc, #148] ; 5ea0 <rtems_aio_init+0xb0>
5e08: e1a01004 mov r1, r4
5e0c: eb0003fa bl 6dfc <pthread_attr_setdetachstate>
pthread_attr_setdetachstate (&aio_request_queue.attr,
PTHREAD_CREATE_DETACHED);
if (result != 0)
5e10: e3500000 cmp r0, #0
pthread_attr_destroy (&aio_request_queue.attr);
5e14: 159f0084 ldrne r0, [pc, #132] ; 5ea0 <rtems_aio_init+0xb0>
5e18: 1b0003dc blne 6d90 <pthread_attr_destroy>
result = pthread_mutex_init (&aio_request_queue.mutex, NULL);
5e1c: e59f0080 ldr r0, [pc, #128] ; 5ea4 <rtems_aio_init+0xb4>
5e20: e3a01000 mov r1, #0
5e24: eb00032f bl 6ae8 <pthread_mutex_init>
if (result != 0)
5e28: e3500000 cmp r0, #0
pthread_attr_destroy (&aio_request_queue.attr);
5e2c: 159f006c ldrne r0, [pc, #108] ; 5ea0 <rtems_aio_init+0xb0>
5e30: 1b0003d6 blne 6d90 <pthread_attr_destroy>
result = pthread_cond_init (&aio_request_queue.new_req, NULL);
5e34: e59f006c ldr r0, [pc, #108] ; 5ea8 <rtems_aio_init+0xb8>
5e38: e3a01000 mov r1, #0
5e3c: eb000241 bl 6748 <pthread_cond_init>
if (result != 0) {
5e40: e2504000 subs r4, r0, #0
5e44: 0a000003 beq 5e58 <rtems_aio_init+0x68>
pthread_mutex_destroy (&aio_request_queue.mutex);
5e48: e59f0054 ldr r0, [pc, #84] ; 5ea4 <rtems_aio_init+0xb4> <== NOT EXECUTED
5e4c: eb0002d9 bl 69b8 <pthread_mutex_destroy> <== NOT EXECUTED
pthread_attr_destroy (&aio_request_queue.attr);
5e50: e59f0048 ldr r0, [pc, #72] ; 5ea0 <rtems_aio_init+0xb0> <== NOT EXECUTED
5e54: eb0003cd bl 6d90 <pthread_attr_destroy> <== NOT EXECUTED
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
5e58: e59f3044 ldr r3, [pc, #68] ; 5ea4 <rtems_aio_init+0xb4>
5e5c: e283204c add r2, r3, #76 ; 0x4c
head->previous = NULL;
tail->previous = head;
5e60: e2831048 add r1, r3, #72 ; 0x48
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
5e64: e5832048 str r2, [r3, #72] ; 0x48
head->previous = NULL;
5e68: e3a02000 mov r2, #0
5e6c: e583204c str r2, [r3, #76] ; 0x4c
tail->previous = head;
5e70: e5831050 str r1, [r3, #80] ; 0x50
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
head->previous = NULL;
5e74: e5832058 str r2, [r3, #88] ; 0x58
)
{
Chain_Node *head = _Chain_Head( the_chain );
Chain_Node *tail = _Chain_Tail( the_chain );
head->next = tail;
5e78: e2831058 add r1, r3, #88 ; 0x58
}
rtems_chain_initialize_empty (&aio_request_queue.work_req);
rtems_chain_initialize_empty (&aio_request_queue.idle_req);
aio_request_queue.active_threads = 0;
5e7c: e5832064 str r2, [r3, #100] ; 0x64
aio_request_queue.idle_threads = 0;
5e80: e5832068 str r2, [r3, #104] ; 0x68
aio_request_queue.initialized = AIO_QUEUE_INITIALIZED;
5e84: e59f2020 ldr r2, [pc, #32] ; 5eac <rtems_aio_init+0xbc>
5e88: e5831054 str r1, [r3, #84] ; 0x54
head->previous = NULL;
tail->previous = head;
5e8c: e2831054 add r1, r3, #84 ; 0x54
5e90: e583105c str r1, [r3, #92] ; 0x5c
5e94: e5832060 str r2, [r3, #96] ; 0x60
return result;
}
5e98: e1a00004 mov r0, r4
5e9c: e8bd8010 pop {r4, pc}
000061e0 <rtems_aio_insert_prio>:
61e0: e1a03000 mov r3, r0
61e4: e4932004 ldr r2, [r3], #4
rtems_chain_node *node;
AIO_printf ("FD exists \n");
node = rtems_chain_first (chain);
if (rtems_chain_is_empty (chain)) {
61e8: e1520003 cmp r2, r3
* NONE
*/
void
rtems_aio_insert_prio (rtems_chain_control *chain, rtems_aio_request *req)
{
61ec: e1a0c001 mov ip, r1
rtems_chain_node *node;
AIO_printf ("FD exists \n");
node = rtems_chain_first (chain);
if (rtems_chain_is_empty (chain)) {
61f0: 0a00000d beq 622c <rtems_aio_insert_prio+0x4c>
AIO_printf ("First in chain \n");
rtems_chain_prepend (chain, &req->next_prio);
} else {
AIO_printf ("Add by priority \n");
int prio = ((rtems_aio_request *) node)->aiocbp->aio_reqprio;
61f4: e5921014 ldr r1, [r2, #20]
while (req->aiocbp->aio_reqprio > prio &&
61f8: e59c0014 ldr r0, [ip, #20]
if (rtems_chain_is_empty (chain)) {
AIO_printf ("First in chain \n");
rtems_chain_prepend (chain, &req->next_prio);
} else {
AIO_printf ("Add by priority \n");
int prio = ((rtems_aio_request *) node)->aiocbp->aio_reqprio;
61fc: e5911014 ldr r1, [r1, #20]
while (req->aiocbp->aio_reqprio > prio &&
6200: e5900014 ldr r0, [r0, #20]
6204: ea000002 b 6214 <rtems_aio_insert_prio+0x34>
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
6208: e5922000 ldr r2, [r2] <== NOT EXECUTED
int prio = ((rtems_aio_request *) node)->aiocbp->aio_reqprio;
while (req->aiocbp->aio_reqprio > prio &&
!rtems_chain_is_tail (chain, node)) {
node = rtems_chain_next (node);
prio = ((rtems_aio_request *) node)->aiocbp->aio_reqprio;
620c: e5921014 ldr r1, [r2, #20] <== NOT EXECUTED
6210: e5911014 ldr r1, [r1, #20] <== NOT EXECUTED
rtems_chain_prepend (chain, &req->next_prio);
} else {
AIO_printf ("Add by priority \n");
int prio = ((rtems_aio_request *) node)->aiocbp->aio_reqprio;
while (req->aiocbp->aio_reqprio > prio &&
6214: e1500001 cmp r0, r1
6218: da000001 ble 6224 <rtems_aio_insert_prio+0x44>
621c: e1520003 cmp r2, r3 <== NOT EXECUTED
6220: 1afffff8 bne 6208 <rtems_aio_insert_prio+0x28> <== NOT EXECUTED
RTEMS_INLINE_ROUTINE void rtems_chain_insert(
rtems_chain_node *after_node,
rtems_chain_node *the_node
)
{
_Chain_Insert( after_node, the_node );
6224: e5920004 ldr r0, [r2, #4]
6228: e1a0100c mov r1, ip
622c: ea0008ab b 84e0 <_Chain_Insert>
00006278 <rtems_aio_remove_req>:
* AIO_NOTCANCELED - if request was not canceled
* AIO_CANCELED - if request was canceled
*/
int rtems_aio_remove_req (rtems_chain_control *chain, struct aiocb *aiocbp)
{
6278: e92d4010 push {r4, lr}
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
627c: e1a03000 mov r3, r0
6280: e4934004 ldr r4, [r3], #4
* AIO_CANCELED - if request was canceled
*/
int rtems_aio_remove_req (rtems_chain_control *chain, struct aiocb *aiocbp)
{
if (rtems_chain_is_empty (chain))
6284: e1540003 cmp r4, r3
return AIO_ALLDONE;
6288: 03a00002 moveq r0, #2
* AIO_CANCELED - if request was canceled
*/
int rtems_aio_remove_req (rtems_chain_control *chain, struct aiocb *aiocbp)
{
if (rtems_chain_is_empty (chain))
628c: 1a000003 bne 62a0 <rtems_aio_remove_req+0x28>
6290: e8bd8010 pop {r4, pc}
}
}
AIO_printf ("Thread finished\n");
return NULL;
}
6294: e5904000 ldr r4, [r0] <== NOT EXECUTED
rtems_chain_node *node = rtems_chain_first (chain);
rtems_aio_request *current;
current = (rtems_aio_request *) node;
while (!rtems_chain_is_tail (chain, node) && current->aiocbp != aiocbp) {
6298: e1540003 cmp r4, r3 <== NOT EXECUTED
629c: 0a00000d beq 62d8 <rtems_aio_remove_req+0x60> <== NOT EXECUTED
62a0: e5942014 ldr r2, [r4, #20]
62a4: e1520001 cmp r2, r1
node = rtems_chain_next (node);
current = (rtems_aio_request *) node;
62a8: e1a00004 mov r0, r4
rtems_chain_node *node = rtems_chain_first (chain);
rtems_aio_request *current;
current = (rtems_aio_request *) node;
while (!rtems_chain_is_tail (chain, node) && current->aiocbp != aiocbp) {
62ac: 1afffff8 bne 6294 <rtems_aio_remove_req+0x1c>
62b0: eb000875 bl 848c <_Chain_Extract>
if (rtems_chain_is_tail (chain, node))
return AIO_NOTCANCELED;
else
{
rtems_chain_extract (node);
current->aiocbp->error_code = ECANCELED;
62b4: e5943014 ldr r3, [r4, #20]
62b8: e3a0208c mov r2, #140 ; 0x8c
62bc: e5832030 str r2, [r3, #48] ; 0x30
current->aiocbp->return_value = -1;
62c0: e3e02000 mvn r2, #0
free (current);
62c4: e1a00004 mov r0, r4
return AIO_NOTCANCELED;
else
{
rtems_chain_extract (node);
current->aiocbp->error_code = ECANCELED;
current->aiocbp->return_value = -1;
62c8: e5832034 str r2, [r3, #52] ; 0x34
free (current);
62cc: ebfff28a bl 2cfc <free>
}
return AIO_CANCELED;
62d0: e3a00000 mov r0, #0
62d4: e8bd8010 pop {r4, pc}
node = rtems_chain_next (node);
current = (rtems_aio_request *) node;
}
if (rtems_chain_is_tail (chain, node))
return AIO_NOTCANCELED;
62d8: e3a00001 mov r0, #1 <== NOT EXECUTED
current->aiocbp->return_value = -1;
free (current);
}
return AIO_CANCELED;
}
62dc: e8bd8010 pop {r4, pc} <== NOT EXECUTED
0000ca44 <rtems_task_mode>:
rtems_status_code rtems_task_mode(
rtems_mode mode_set,
rtems_mode mask,
rtems_mode *previous_mode_set
)
{
ca44: e92d47f0 push {r4, r5, r6, r7, r8, r9, sl, lr}
ASR_Information *asr;
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
ca48: e252a000 subs sl, r2, #0
rtems_status_code rtems_task_mode(
rtems_mode mode_set,
rtems_mode mask,
rtems_mode *previous_mode_set
)
{
ca4c: e1a04000 mov r4, r0
ca50: e1a05001 mov r5, r1
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
return RTEMS_INVALID_ADDRESS;
ca54: 03a00009 moveq r0, #9
ASR_Information *asr;
bool is_asr_enabled = false;
bool needs_asr_dispatching = false;
rtems_mode old_mode;
if ( !previous_mode_set )
ca58: 08bd87f0 popeq {r4, r5, r6, r7, r8, r9, sl, pc}
return RTEMS_INVALID_ADDRESS;
executing = _Thread_Executing;
ca5c: e59f313c ldr r3, [pc, #316] ; cba0 <rtems_task_mode+0x15c>
ca60: e5937004 ldr r7, [r3, #4]
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
ca64: e5d78074 ldrb r8, [r7, #116] ; 0x74
if ( !previous_mode_set )
return RTEMS_INVALID_ADDRESS;
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
ca68: e59760f8 ldr r6, [r7, #248] ; 0xf8
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
ca6c: e597307c ldr r3, [r7, #124] ; 0x7c
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
ca70: e3580000 cmp r8, #0
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
ca74: e5d69008 ldrb r9, [r6, #8]
executing = _Thread_Executing;
api = executing->API_Extensions[ THREAD_API_RTEMS ];
asr = &api->Signal;
old_mode = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;
ca78: 03a08c01 moveq r8, #256 ; 0x100
ca7c: 13a08000 movne r8, #0
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
ca80: e3530000 cmp r3, #0
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
ca84: 13888c02 orrne r8, r8, #512 ; 0x200
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
ca88: e3590000 cmp r9, #0
ca8c: 03a09b01 moveq r9, #1024 ; 0x400
ca90: 13a09000 movne r9, #0
old_mode |= _ISR_Get_level();
ca94: ebffefe6 bl 8a34 <_CPU_ISR_Get_level>
if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
old_mode |= RTEMS_NO_TIMESLICE;
else
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
ca98: e1899000 orr r9, r9, r0
old_mode |= _ISR_Get_level();
ca9c: e1898008 orr r8, r9, r8
*previous_mode_set = old_mode;
/*
* These are generic thread scheduling characteristics.
*/
if ( mask & RTEMS_PREEMPT_MASK )
caa0: e3150c01 tst r5, #256 ; 0x100
old_mode |= RTEMS_TIMESLICE;
old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
old_mode |= _ISR_Get_level();
*previous_mode_set = old_mode;
caa4: e58a8000 str r8, [sl]
/*
* These are generic thread scheduling characteristics.
*/
if ( mask & RTEMS_PREEMPT_MASK )
caa8: 0a000003 beq cabc <rtems_task_mode+0x78>
executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;
caac: e3140c01 tst r4, #256 ; 0x100
cab0: 13a03000 movne r3, #0
cab4: 03a03001 moveq r3, #1
cab8: e5c73074 strb r3, [r7, #116] ; 0x74
if ( mask & RTEMS_TIMESLICE_MASK ) {
cabc: e3150c02 tst r5, #512 ; 0x200
cac0: 0a000006 beq cae0 <rtems_task_mode+0x9c>
if ( _Modes_Is_timeslice(mode_set) ) {
cac4: e2143c02 ands r3, r4, #512 ; 0x200
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
cac8: 13a03001 movne r3, #1
cacc: 1587307c strne r3, [r7, #124] ; 0x7c
executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
cad0: 159f30cc ldrne r3, [pc, #204] ; cba4 <rtems_task_mode+0x160>
cad4: 15933000 ldrne r3, [r3]
} else
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
cad8: 0587307c streq r3, [r7, #124] ; 0x7c
executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;
if ( mask & RTEMS_TIMESLICE_MASK ) {
if ( _Modes_Is_timeslice(mode_set) ) {
executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
cadc: 15873078 strne r3, [r7, #120] ; 0x78
}
/*
* Set the new interrupt level
*/
if ( mask & RTEMS_INTERRUPT_MASK )
cae0: e3150080 tst r5, #128 ; 0x80
cae4: 0a000001 beq caf0 <rtems_task_mode+0xac>
*/
RTEMS_INLINE_ROUTINE void _Modes_Set_interrupt_level (
Modes_Control mode_set
)
{
_ISR_Set_level( _Modes_Get_interrupt_level( mode_set ) );
cae8: e2040080 and r0, r4, #128 ; 0x80
caec: ebffefcb bl 8a20 <_CPU_ISR_Set_level>
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
caf0: e2150b01 ands r0, r5, #1024 ; 0x400
caf4: 0a000013 beq cb48 <rtems_task_mode+0x104>
* Output:
* *previous_mode_set - previous mode set
* always return RTEMS_SUCCESSFUL;
*/
rtems_status_code rtems_task_mode(
caf8: e3140b01 tst r4, #1024 ; 0x400
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
cafc: e5d62008 ldrb r2, [r6, #8]
* Output:
* *previous_mode_set - previous mode set
* always return RTEMS_SUCCESSFUL;
*/
rtems_status_code rtems_task_mode(
cb00: 13a03000 movne r3, #0
cb04: 03a03001 moveq r3, #1
is_asr_enabled = false;
needs_asr_dispatching = false;
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
cb08: e1520003 cmp r2, r3
/*
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
cb0c: 03a00000 moveq r0, #0
if ( mask & RTEMS_ASR_MASK ) {
is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
if ( is_asr_enabled != asr->is_enabled ) {
cb10: 0a00000c beq cb48 <rtems_task_mode+0x104>
asr->is_enabled = is_asr_enabled;
cb14: e5c63008 strb r3, [r6, #8]
static inline uint32_t arm_interrupt_disable( void )
{
uint32_t arm_switch_reg;
uint32_t level;
asm volatile (
cb18: e10f3000 mrs r3, CPSR
cb1c: e3832080 orr r2, r3, #128 ; 0x80
cb20: e129f002 msr CPSR_fc, r2
{
rtems_signal_set _signals;
ISR_Level _level;
_ISR_Disable( _level );
_signals = information->signals_pending;
cb24: e2861014 add r1, r6, #20
cb28: e8910006 ldm r1, {r1, r2}
information->signals_pending = information->signals_posted;
information->signals_posted = _signals;
cb2c: e5862014 str r2, [r6, #20]
rtems_signal_set _signals;
ISR_Level _level;
_ISR_Disable( _level );
_signals = information->signals_pending;
information->signals_pending = information->signals_posted;
cb30: e5861018 str r1, [r6, #24]
static inline void arm_interrupt_enable( uint32_t level )
{
ARM_SWITCH_REGISTERS;
asm volatile (
cb34: e129f003 msr CPSR_fc, r3
_ASR_Swap_signals( asr );
if ( _ASR_Are_signals_pending( asr ) ) {
cb38: e5960014 ldr r0, [r6, #20]
/*
* This is specific to the RTEMS API
*/
is_asr_enabled = false;
needs_asr_dispatching = false;
cb3c: e3500000 cmp r0, #0
cb40: 13a00001 movne r0, #1
cb44: 03a00000 moveq r0, #0
needs_asr_dispatching = true;
}
}
}
if ( _System_state_Is_up( _System_state_Get() ) ) {
cb48: e59f3058 ldr r3, [pc, #88] ; cba8 <rtems_task_mode+0x164>
cb4c: e5933000 ldr r3, [r3]
cb50: e3530003 cmp r3, #3
cb54: 1a00000f bne cb98 <rtems_task_mode+0x154>
bool are_signals_pending
)
{
Thread_Control *executing;
executing = _Thread_Executing;
cb58: e59f2040 ldr r2, [pc, #64] ; cba0 <rtems_task_mode+0x15c>
if ( are_signals_pending ||
cb5c: e3500000 cmp r0, #0
bool are_signals_pending
)
{
Thread_Control *executing;
executing = _Thread_Executing;
cb60: e5923004 ldr r3, [r2, #4]
if ( are_signals_pending ||
cb64: 1a000005 bne cb80 <rtems_task_mode+0x13c>
cb68: e5922008 ldr r2, [r2, #8]
cb6c: e1530002 cmp r3, r2
cb70: 08bd87f0 popeq {r4, r5, r6, r7, r8, r9, sl, pc}
(!_Thread_Is_heir( executing ) && executing->is_preemptible) ) {
cb74: e5d33074 ldrb r3, [r3, #116] ; 0x74
cb78: e3530000 cmp r3, #0
cb7c: 08bd87f0 popeq {r4, r5, r6, r7, r8, r9, sl, pc}
_Thread_Dispatch_necessary = true;
cb80: e59f3018 ldr r3, [pc, #24] ; cba0 <rtems_task_mode+0x15c>
cb84: e3a02001 mov r2, #1
cb88: e5c32010 strb r2, [r3, #16]
if (_Thread_Evaluate_is_dispatch_needed( needs_asr_dispatching ) )
_Thread_Dispatch();
cb8c: ebffea23 bl 7420 <_Thread_Dispatch>
}
return RTEMS_SUCCESSFUL;
cb90: e3a00000 mov r0, #0
cb94: e8bd87f0 pop {r4, r5, r6, r7, r8, r9, sl, pc}
cb98: e3a00000 mov r0, #0 <== NOT EXECUTED
}
cb9c: e8bd87f0 pop {r4, r5, r6, r7, r8, r9, sl, pc} <== NOT EXECUTED
00005b84 <sigaction>:
struct sigaction *oact
)
{
ISR_Level level;
if ( oact )
5b84: e2523000 subs r3, r2, #0
*oact = _POSIX_signals_Vectors[ sig ];
5b88: 159f20b8 ldrne r2, [pc, #184] ; 5c48 <sigaction+0xc4>
int sigaction(
int sig,
const struct sigaction *act,
struct sigaction *oact
)
{
5b8c: e92d40f0 push {r4, r5, r6, r7, lr}
5b90: e1a05001 mov r5, r1
ISR_Level level;
if ( oact )
*oact = _POSIX_signals_Vectors[ sig ];
5b94: 13a0100c movne r1, #12
5b98: 10222091 mlane r2, r1, r0, r2
int sigaction(
int sig,
const struct sigaction *act,
struct sigaction *oact
)
{
5b9c: e1a04000 mov r4, r0
ISR_Level level;
if ( oact )
*oact = _POSIX_signals_Vectors[ sig ];
5ba0: 18920007 ldmne r2, {r0, r1, r2}
5ba4: 18830007 stmne r3, {r0, r1, r2}
if ( !sig )
5ba8: e3540000 cmp r4, #0
5bac: 0a000004 beq 5bc4 <sigaction+0x40>
static inline bool is_valid_signo(
int signo
)
{
return ((signo) >= 1 && (signo) <= 32 );
5bb0: e2443001 sub r3, r4, #1
rtems_set_errno_and_return_minus_one( EINVAL );
if ( !is_valid_signo(sig) )
5bb4: e353001f cmp r3, #31
5bb8: 8a000001 bhi 5bc4 <sigaction+0x40>
*
* NOTE: Solaris documentation claims to "silently enforce" this which
* contradicts the POSIX specification.
*/
if ( sig == SIGKILL )
5bbc: e3540009 cmp r4, #9
5bc0: 1a000004 bne 5bd8 <sigaction+0x54>
rtems_set_errno_and_return_minus_one( EINVAL );
5bc4: eb002154 bl e11c <__errno>
5bc8: e3a03016 mov r3, #22
5bcc: e5803000 str r3, [r0]
5bd0: e3e00000 mvn r0, #0
5bd4: e8bd80f0 pop {r4, r5, r6, r7, pc}
/*
* Evaluate the new action structure and set the global signal vector
* appropriately.
*/
if ( act ) {
5bd8: e3550000 cmp r5, #0
5bdc: 0a000017 beq 5c40 <sigaction+0xbc>
static inline uint32_t arm_interrupt_disable( void )
{
uint32_t arm_switch_reg;
uint32_t level;
asm volatile (
5be0: e10f6000 mrs r6, CPSR
5be4: e3863080 orr r3, r6, #128 ; 0x80
5be8: e129f003 msr CPSR_fc, r3
* Unless the user is installing the default signal actions, then
* we can just copy the provided sigaction structure into the vectors.
*/
_ISR_Disable( level );
if ( act->sa_handler == SIG_DFL ) {
5bec: e5953008 ldr r3, [r5, #8]
5bf0: e3530000 cmp r3, #0
5bf4: e59f704c ldr r7, [pc, #76] ; 5c48 <sigaction+0xc4>
5bf8: 1a000007 bne 5c1c <sigaction+0x98>
_POSIX_signals_Vectors[ sig ] = _POSIX_signals_Default_vectors[ sig ];
5bfc: e283300c add r3, r3, #12
5c00: e0040493 mul r4, r3, r4
5c04: e59f2040 ldr r2, [pc, #64] ; 5c4c <sigaction+0xc8>
5c08: e0873004 add r3, r7, r4
5c0c: e0824004 add r4, r2, r4
5c10: e8940007 ldm r4, {r0, r1, r2}
5c14: e8830007 stm r3, {r0, r1, r2}
5c18: ea000005 b 5c34 <sigaction+0xb0>
} else {
_POSIX_signals_Clear_process_signals( sig );
5c1c: e1a00004 mov r0, r4
5c20: eb0015b2 bl b2f0 <_POSIX_signals_Clear_process_signals>
_POSIX_signals_Vectors[ sig ] = *act;
5c24: e3a0300c mov r3, #12
5c28: e0247493 mla r4, r3, r4, r7
5c2c: e8950007 ldm r5, {r0, r1, r2}
5c30: e8840007 stm r4, {r0, r1, r2}
static inline void arm_interrupt_enable( uint32_t level )
{
ARM_SWITCH_REGISTERS;
asm volatile (
5c34: e129f006 msr CPSR_fc, r6
* now (signals not posted when SIG_IGN).
* + If we are now ignoring a signal that was previously pending,
* we clear the pending signal indicator.
*/
return 0;
5c38: e3a00000 mov r0, #0
5c3c: e8bd80f0 pop {r4, r5, r6, r7, pc}
5c40: e1a00005 mov r0, r5
}
5c44: e8bd80f0 pop {r4, r5, r6, r7, pc}
000081ec <sigwait>:
int sigwait(
const sigset_t *set,
int *sig
)
{
81ec: e92d4010 push {r4, lr}
81f0: e1a04001 mov r4, r1
int status;
status = sigtimedwait( set, NULL, NULL );
81f4: e3a01000 mov r1, #0
81f8: e1a02001 mov r2, r1
81fc: ebffff84 bl 8014 <sigtimedwait>
if ( status != -1 ) {
8200: e3700001 cmn r0, #1
8204: 0a000004 beq 821c <sigwait+0x30>
if ( sig )
8208: e3540000 cmp r4, #0
*sig = status;
820c: 15840000 strne r0, [r4]
return 0;
8210: 13a00000 movne r0, #0
int status;
status = sigtimedwait( set, NULL, NULL );
if ( status != -1 ) {
if ( sig )
8214: 18bd8010 popne {r4, pc}
8218: ea000002 b 8228 <sigwait+0x3c> <== NOT EXECUTED
*sig = status;
return 0;
}
return errno;
821c: eb002082 bl 1042c <__errno>
8220: e5900000 ldr r0, [r0]
8224: e8bd8010 pop {r4, pc}
status = sigtimedwait( set, NULL, NULL );
if ( status != -1 ) {
if ( sig )
*sig = status;
return 0;
8228: e1a00004 mov r0, r4 <== NOT EXECUTED
}
return errno;
}
822c: e8bd8010 pop {r4, pc} <== NOT EXECUTED