Bug Summary

File:/home/joel/rtems-4.11-work/build/rtems/c/src/../../cpukit/score/src/heap.c
Location:line 419, column 5
Description:Value stored to 'block_begin' is never read

Annotated Source Code

1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 * COPYRIGHT (c) 1989-2009.
11 * On-Line Applications Research Corporation (OAR).
12 *
13 * Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 * The license and distribution terms for this file may be
16 * found in the file LICENSE in this distribution or at
17 * http://www.rtems.com/license/LICENSE.
18 *
19 * $Id: heap.c,v 1.39 2010/08/25 12:35:52 sh Exp $
20 */
21
22#if HAVE_CONFIG_H1
23#include "config.h"
24#endif
25
26#include <string.h>
27
28#include <rtems/system.h>
29#include <rtems/score/heap.h>
30#include <rtems/score/interr.h>
31
32#if CPU_ALIGNMENT8 == 0 || CPU_ALIGNMENT8 % 2 != 0
33 #error "invalid CPU_ALIGNMENT value"
34#endif
35
36static uint32_t instance = 0;
37
38/*PAGE
39 *
40 * _Heap_Initialize
41 *
42 * This kernel routine initializes a heap.
43 *
44 * Input parameters:
45 * heap - pointer to heap header
46 * area_begin - starting address of heap
47 * size - size of heap
48 * page_size - allocatable unit of memory
49 *
50 * Output parameters:
51 * returns - maximum memory available if RTEMS_SUCCESSFUL
52 * 0 - otherwise
53 *
54 * This is what a heap looks like in memory immediately after initialization:
55 *
56 *
57 * +--------------------------------+ <- begin = area_begin
58 * | unused space due to alignment |
59 * | size < page_size |
60 * 0 +--------------------------------+ <- first block
61 * | prev_size = page_size |
62 * 4 +--------------------------------+
63 * | size = size0 | 1 |
64 * 8 +---------------------+----------+ <- aligned on page_size
65 * | next = HEAP_TAIL | |
66 * 12 +---------------------+ |
67 * | prev = HEAP_HEAD | memory |
68 * +---------------------+ |
69 * | available |
70 * | |
71 * | for allocation |
72 * | |
73 * size0 +--------------------------------+ <- last dummy block
74 * | prev_size = size0 |
75 * +4 +--------------------------------+
76 * | size = page_size | 0 | <- prev block is free
77 * +8 +--------------------------------+ <- aligned on page_size
78 * | unused space due to alignment |
79 * | size < page_size |
80 * +--------------------------------+ <- end = begin + size
81 *
82 * Below is what a heap looks like after first allocation of SIZE bytes using
83 * _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
84 * boundary.
85 * [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
86 * block size BSIZE is defined differently, and previously free block will
87 * be split so that upper part of it will become used block (see
88 * 'heapallocatealigned.c' for details).]
89 *
90 * +--------------------------------+ <- begin = area_begin
91 * | unused space due to alignment |
92 * | size < page_size |
93 * 0 +--------------------------------+ <- used block
94 * | prev_size = page_size |
95 * 4 +--------------------------------+
96 * | size = BSIZE | 1 | <- prev block is used
97 * 8 +--------------------------------+ <- aligned on page_size
98 * | . | Pointer returned to the user
99 * | . | is 8 for _Heap_Allocate()
100 * | . | and is in range
101 * 8 + | user-accessible | [8,8+page_size) for
102 * page_size +- - - - - -+ _Heap_Allocate_aligned()
103 * | area |
104 * | . |
105 * BSIZE +- - - - - . - - - - -+ <- free block
106 * | . |
107 * BSIZE +4 +--------------------------------+
108 * | size = S = size0 - BSIZE | 1 | <- prev block is used
109 * BSIZE +8 +-------------------+------------+ <- aligned on page_size
110 * | next = HEAP_TAIL | |
111 * BSIZE +12 +-------------------+ |
112 * | prev = HEAP_HEAD | memory |
113 * +-------------------+ |
114 * | . available |
115 * | . |
116 * | . for |
117 * | . |
118 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
119 * | prev_size = S | |
120 * +S+4 +-------------------+------------+
121 * | size = page_size | 0 | <- prev block is free
122 * +S+8 +--------------------------------+ <- aligned on page_size
123 * | unused space due to alignment |
124 * | size < page_size |
125 * +--------------------------------+ <- end = begin + size
126 *
127 */
128
129#ifdef HEAP_PROTECTION
130 static void _Heap_Protection_block_initialize_default(
131 Heap_Control *heap,
132 Heap_Block *block
133 )
134 {
135 block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
136 block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
137 block->Protection_begin.next_delayed_free_block = NULL((void*)0);
138 block->Protection_begin.task = _Thread_Executing_Per_CPU_Information.executing;
139 block->Protection_begin.tag = NULL((void*)0);
140 block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
141 block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
142 }
143
144 static void _Heap_Protection_block_check_default(
145 Heap_Control *heap,
146 Heap_Block *block
147 )
148 {
149 if (
150 block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
151 || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
152 || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
153 || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
154 ) {
155 _Heap_Protection_block_error( heap, block )((void) 0);
156 }
157 }
158
159 static void _Heap_Protection_block_error_default(
160 Heap_Control *heap,
161 Heap_Block *block
162 )
163 {
164 /* FIXME */
165 _Internal_error_Occurred( 0xdeadbeef, false0, 0xdeadbeef );
166 }
167#endif
168
169bool_Bool _Heap_Get_first_and_last_block(
170 uintptr_t heap_area_begin,
171 uintptr_t heap_area_size,
172 uintptr_t page_size,
173 uintptr_t min_block_size,
174 Heap_Block **first_block_ptr,
175 Heap_Block **last_block_ptr
176)
177{
178 uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
179 uintptr_t const alloc_area_begin =
180 _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE(2 * sizeof(uintptr_t) + 0), page_size );
181 uintptr_t const first_block_begin =
182 alloc_area_begin - HEAP_BLOCK_HEADER_SIZE(2 * sizeof(uintptr_t) + 0);
183 uintptr_t const overhead =
184 HEAP_BLOCK_HEADER_SIZE(2 * sizeof(uintptr_t) + 0) + (first_block_begin - heap_area_begin);
185 uintptr_t const first_block_size =
186 _Heap_Align_down( heap_area_size - overhead, page_size );
187 Heap_Block *const first_block = (Heap_Block *) first_block_begin;
188 Heap_Block *const last_block =
189 _Heap_Block_at( first_block, first_block_size );
190
191 if (
192 heap_area_end < heap_area_begin
193 || heap_area_size <= overhead
194 || first_block_size < min_block_size
195 ) {
196 /* Invalid area or area too small */
197 return false0;
198 }
199
200 *first_block_ptr = first_block;
201 *last_block_ptr = last_block;
202
203 return true1;
204}
205
206uintptr_t _Heap_Initialize(
207 Heap_Control *heap,
208 void *heap_area_begin_ptr,
209 uintptr_t heap_area_size,
210 uintptr_t page_size
211)
212{
213 Heap_Statistics *const stats = &heap->stats;
214 uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
215 uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
216 uintptr_t first_block_begin = 0;
217 uintptr_t first_block_size = 0;
218 uintptr_t last_block_begin = 0;
219 uintptr_t min_block_size = 0;
220 bool_Bool area_ok = false0;
221 Heap_Block *first_block = NULL((void*)0);
222 Heap_Block *last_block = NULL((void*)0);
223
224 if ( page_size == 0 ) {
225 page_size = CPU_ALIGNMENT8;
226 } else {
227 page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT8 );
228
229 if ( page_size < CPU_ALIGNMENT8 ) {
230 /* Integer overflow */
231 return 0;
232 }
233 }
234 min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
235
236 area_ok = _Heap_Get_first_and_last_block(
237 heap_area_begin,
238 heap_area_size,
239 page_size,
240 min_block_size,
241 &first_block,
242 &last_block
243 );
244 if ( !area_ok ) {
245 return 0;
246 }
247
248 memset(heap, 0, sizeof(*heap));
249
250 #ifdef HEAP_PROTECTION
251 heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
252 heap->Protection.block_check = _Heap_Protection_block_check_default;
253 heap->Protection.block_error = _Heap_Protection_block_error_default;
254 #endif
255
256 first_block_begin = (uintptr_t) first_block;
257 last_block_begin = (uintptr_t) last_block;
258 first_block_size = last_block_begin - first_block_begin;
259
260 /* First block */
261 first_block->prev_size = heap_area_end;
262 first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED((uintptr_t) 1);
263 first_block->next = _Heap_Free_list_tail( heap );
264 first_block->prev = _Heap_Free_list_head( heap );
265 _Heap_Protection_block_initialize( heap, first_block )((void) 0);
266
267 /* Heap control */
268 heap->page_size = page_size;
269 heap->min_block_size = min_block_size;
270 heap->area_begin = heap_area_begin;
271 heap->area_end = heap_area_end;
272 heap->first_block = first_block;
273 heap->last_block = last_block;
274 _Heap_Free_list_head( heap )->next = first_block;
275 _Heap_Free_list_tail( heap )->prev = first_block;
276
277 /* Last block */
278 last_block->prev_size = first_block_size;
279 last_block->size_and_flag = 0;
280 _Heap_Set_last_block_size( heap );
281 _Heap_Protection_block_initialize( heap, last_block )((void) 0);
282
283 /* Statistics */
284 stats->size = first_block_size;
285 stats->free_size = first_block_size;
286 stats->min_free_size = first_block_size;
287 stats->free_blocks = 1;
288 stats->max_free_blocks = 1;
289 stats->instance = instance++;
290
291 _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) )((void) 0);
292 _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) )((void) 0);
293 _HAssert(((void) 0)
294 _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )((void) 0)
295 )((void) 0);
296 _HAssert(((void) 0)
297 _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )((void) 0)
298 )((void) 0);
299
300 return first_block_size;
301}
302
303static void _Heap_Block_split(
304 Heap_Control *heap,
305 Heap_Block *block,
306 Heap_Block *free_list_anchor,
307 uintptr_t alloc_size
308)
309{
310 Heap_Statistics *const stats = &heap->stats;
311
312 uintptr_t const page_size = heap->page_size;
313 uintptr_t const min_block_size = heap->min_block_size;
314 uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE(2 * sizeof(uintptr_t) + 0);
315
316 uintptr_t const block_size = _Heap_Block_size( block );
317
318 uintptr_t const used_size =
319 _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE(2 * sizeof(uintptr_t) + 0);
320 uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
321
322 uintptr_t const free_size = block_size + HEAP_ALLOC_BONUSsizeof(uintptr_t) - used_size;
323 uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUSsizeof(uintptr_t);
324
325 Heap_Block *next_block = _Heap_Block_at( block, block_size );
326
327 _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS )((void) 0);
328 _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS )((void) 0);
329
330 if ( free_size >= free_size_limit ) {
331 Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
332 uintptr_t free_block_size = block_size - used_block_size;
333
334 _HAssert( used_block_size + free_block_size == block_size )((void) 0);
335
336 _Heap_Block_set_size( block, used_block_size );
337
338 /* Statistics */
339 stats->free_size += free_block_size;
340
341 if ( _Heap_Is_used( next_block ) ) {
342 _Heap_Free_list_insert_after( free_list_anchor, free_block );
343
344 /* Statistics */
345 ++stats->free_blocks;
346 } else {
347 uintptr_t const next_block_size = _Heap_Block_size( next_block );
348
349 _Heap_Free_list_replace( next_block, free_block );
350
351 free_block_size += next_block_size;
352
353 next_block = _Heap_Block_at( free_block, free_block_size );
354 }
355
356 free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED((uintptr_t) 1);
357
358 next_block->prev_size = free_block_size;
359 next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED((uintptr_t) 1);
360
361 _Heap_Protection_block_initialize( heap, free_block )((void) 0);
362 } else {
363 next_block->size_and_flag |= HEAP_PREV_BLOCK_USED((uintptr_t) 1);
364 }
365}
366
367static Heap_Block *_Heap_Block_allocate_from_begin(
368 Heap_Control *heap,
369 Heap_Block *block,
370 Heap_Block *free_list_anchor,
371 uintptr_t alloc_size
372)
373{
374 _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
375
376 return block;
377}
378
379static Heap_Block *_Heap_Block_allocate_from_end(
380 Heap_Control *heap,
381 Heap_Block *block,
382 Heap_Block *free_list_anchor,
383 uintptr_t alloc_begin,
384 uintptr_t alloc_size
385)
386{
387 Heap_Statistics *const stats = &heap->stats;
388
389 uintptr_t block_begin = (uintptr_t) block;
390 uintptr_t block_size = _Heap_Block_size( block );
391 uintptr_t block_end = block_begin + block_size;
392
393 Heap_Block *const new_block =
394 _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
395 uintptr_t const new_block_begin = (uintptr_t) new_block;
396 uintptr_t const new_block_size = block_end - new_block_begin;
397
398 block_end = new_block_begin;
399 block_size = block_end - block_begin;
400
401 _HAssert( block_size >= heap->min_block_size )((void) 0);
402 _HAssert( new_block_size >= heap->min_block_size )((void) 0);
403
404 /* Statistics */
405 stats->free_size += block_size;
406
407 if ( _Heap_Is_prev_used( block ) ) {
408 _Heap_Free_list_insert_after( free_list_anchor, block );
409
410 free_list_anchor = block;
411
412 /* Statistics */
413 ++stats->free_blocks;
414 } else {
415 Heap_Block *const prev_block = _Heap_Prev_block( block );
416 uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
417
418 block = prev_block;
419 block_begin = (uintptr_t) block;
Value stored to 'block_begin' is never read
420 block_size += prev_block_size;
421 }
422
423 block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED((uintptr_t) 1);
424
425 new_block->prev_size = block_size;
426 new_block->size_and_flag = new_block_size;
427
428 _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
429
430 return new_block;
431}
432
433Heap_Block *_Heap_Block_allocate(
434 Heap_Control *heap,
435 Heap_Block *block,
436 uintptr_t alloc_begin,
437 uintptr_t alloc_size
438)
439{
440 Heap_Statistics *const stats = &heap->stats;
441
442 uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
443 uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
444
445 Heap_Block *free_list_anchor = NULL((void*)0);
446
447 _HAssert( alloc_area_begin <= alloc_begin )((void) 0);
448
449 if ( _Heap_Is_free( block ) ) {
450 free_list_anchor = block->prev;
451
452 _Heap_Free_list_remove( block );
453
454 /* Statistics */
455 --stats->free_blocks;
456 ++stats->used_blocks;
457 stats->free_size -= _Heap_Block_size( block );
458 } else {
459 free_list_anchor = _Heap_Free_list_head( heap );
460 }
461
462 if ( alloc_area_offset < heap->page_size ) {
463 alloc_size += alloc_area_offset;
464
465 block = _Heap_Block_allocate_from_begin(
466 heap,
467 block,
468 free_list_anchor,
469 alloc_size
470 );
471 } else {
472 block = _Heap_Block_allocate_from_end(
473 heap,
474 block,
475 free_list_anchor,
476 alloc_begin,
477 alloc_size
478 );
479 }
480
481 /* Statistics */
482 if ( stats->min_free_size > stats->free_size ) {
483 stats->min_free_size = stats->free_size;
484 }
485
486 _Heap_Protection_block_initialize( heap, block )((void) 0);
487
488 return block;
489}