28 #include <rtems/test.h> 37 #define WAKEUP_EVENT RTEMS_EVENT_0 42 volatile unsigned int *chunk;
49 size_t cache_line_size;
51 volatile unsigned int *chunk;
58 dirty_data_cache(
volatile unsigned int *chunk,
size_t chunk_size,
59 size_t cache_line_size,
unsigned int token)
65 m = chunk_size /
sizeof(chunk[0]);
66 k = cache_line_size /
sizeof(chunk[0]);
68 for (i = 0; i < m; i += k) {
109 volatile unsigned int *chunk;
111 size_t cache_line_size;
116 chunk_size = ctx->chunk_size;
117 cache_line_size = ctx->cache_line_size;
120 token = dirty_data_cache(chunk, chunk_size, cache_line_size, token);
124 token = dirty_data_cache(chunk, chunk_size, cache_line_size,
134 for (load = 0; load < ctx->load_count; ++load) {
137 lctx = &ctx->load_contexts[load];
139 if (lctx->chunk != ctx->chunk) {
161 add_offset(
const volatile void *p, uintptr_t o)
163 return (
void *)((uintptr_t)p + o);
167 align_up(
const volatile void *p, uintptr_t a)
169 return (
void *)(((uintptr_t)p + a - 1) & ~(a - 1));
177 size_t cache_line_size;
187 sample_size = config->sample_count *
sizeof(ctx->samples[0]);
191 if (cache_line_size == 0) {
197 if (chunk_size == 0) {
198 chunk_size = cache_line_size;
204 load_size = load_count *
sizeof(ctx->load_contexts[0]);
206 ctx = malloc(
sizeof(*ctx) + sample_size + load_size + chunk_size +
207 2 * cache_line_size);
213 ctx->sample_count = config->sample_count;
214 ctx->samples = add_offset(ctx,
sizeof(*ctx));
215 ctx->cache_line_size = cache_line_size;
216 ctx->chunk_size = chunk_size;
217 ctx->chunk = add_offset(ctx->samples, sample_size);
219 ctx->load_count = load_count;
220 ctx->load_contexts = add_offset(ctx->chunk, chunk_size);
221 ctx->samples = align_up(ctx->samples, cache_line_size);
222 ctx->chunk = align_up(ctx->chunk, cache_line_size);
224 memset(ctx->load_contexts, 0, load_size);
227 for (i = 0; i < load_count; ++i) {
249 lctx = &ctx->load_contexts[i];
254 lctx->chunk = malloc(chunk_size);
255 if (lctx->chunk == NULL) {
256 lctx->chunk = ctx->chunk;
264 CPU_SET((
int)i, &cpu);
272 suspend_worker(lctx);
282 destroy(&ctx->destructor);
286 T_add_destructor(&ctx->destructor, destroy);
291 cmp(
const void *ap,
const void *bp)
296 a = *(
const T_ticks *)ap;
297 b = *(
const T_ticks *)bp;
309 measure_variant_begin(
const char *name,
const char *variant)
311 T_printf(
"M:B:%s\n", name);
312 T_printf(
"M:V:%s\n", variant);
316 accumulate(
const T_ticks *samples,
size_t sample_count)
323 for (i = 0; i < sample_count; ++i) {
324 a += T_ticks_to_time(samples[i]);
331 median_absolute_deviation(T_ticks *samples,
size_t sample_count)
336 median = samples[sample_count / 2];
338 for (i = 0; i < sample_count / 2; ++i) {
339 samples[i] = median - samples[i];
342 for (; i < sample_count; ++i) {
343 samples[i] = samples[i] - median;
346 qsort(samples, sample_count,
sizeof(samples[0]), cmp);
347 return samples[sample_count / 2];
354 const T_ticks *samples;
361 sample_count = ctx->sample_count;
362 samples = ctx->samples;
367 for (i = 0; i < sample_count; ++i) {
378 T_time_to_seconds_and_nanoseconds(T_ticks_to_time(last),
380 t = T_ticks_to_time(v);
381 T_time_to_seconds_and_nanoseconds(t, &sb, &nsb);
383 if (sa != sb || nsa != nsb) {
384 T_printf(
"M:S:%zu:%s\n", count,
385 T_time_to_string_ns(t, ts));
394 T_printf(
"M:S:%zu:%s\n", count,
395 T_ticks_to_string_ns(last, ts));
410 sample_count = ctx->sample_count;
411 samples = ctx->samples;
413 a = accumulate(samples, sample_count);
414 qsort(samples, sample_count,
sizeof(samples[0]), cmp);
415 T_printf(
"M:N:%zu\n", sample_count);
417 if ((req->flags & T_MEASURE_RUNTIME_REPORT_SAMPLES) != 0) {
418 report_sorted_samples(ctx);
422 T_printf(
"M:MI:%s\n", T_ticks_to_string_ns(v, ts));
423 v = samples[(1 * sample_count) / 100];
424 T_printf(
"M:P1:%s\n", T_ticks_to_string_ns(v, ts));
425 v = samples[(1 * sample_count) / 4];
426 T_printf(
"M:Q1:%s\n", T_ticks_to_string_ns(v, ts));
427 v = samples[sample_count / 2];
428 T_printf(
"M:Q2:%s\n", T_ticks_to_string_ns(v, ts));
429 v = samples[(3 * sample_count) / 4];
430 T_printf(
"M:Q3:%s\n", T_ticks_to_string_ns(v, ts));
431 v = samples[(99 * sample_count) / 100];
432 T_printf(
"M:P99:%s\n", T_ticks_to_string_ns(v, ts));
433 v = samples[sample_count - 1];
434 T_printf(
"M:MX:%s\n", T_ticks_to_string_ns(v, ts));
435 v = median_absolute_deviation(samples, sample_count);
436 T_printf(
"M:MAD:%s\n", T_ticks_to_string_ns(v, ts));
437 T_printf(
"M:D:%s\n", T_time_to_string_ns(a, ts));
438 T_printf(
"M:E:%s:D:%s\n", req->name, T_time_to_string_ns(d, ts));
442 fill_data_cache(
volatile unsigned int *chunk,
size_t chunk_size,
443 size_t cache_line_size)
449 m = chunk_size /
sizeof(chunk[0]);
450 k = cache_line_size /
sizeof(chunk[0]);
452 for (i = 0; i < m; i += k) {
458 dirty_call(
void (*body)(
void *),
void *arg)
463 space = alloca(1024);
472 if (req->setup != NULL) {
479 uint32_t tic, uint32_t toc,
unsigned int retry,
480 unsigned int maximum_retries)
482 if (req->teardown == NULL) {
483 return tic == toc || retry >= maximum_retries;
486 return (*req->teardown)(arg, delta, tic, toc, retry);
492 return (req->flags & T_MEASURE_RUNTIME_ALLOW_CLOCK_ISR) != 0 ? 1 : 0;
501 void (*body)(
void *);
506 measure_variant_begin(req->name,
"ValidCache");
508 sample_count = ctx->sample_count;
509 samples = ctx->samples;
513 for (i = 0; i < sample_count; ++i) {
514 unsigned int maximum_retries;
517 maximum_retries = get_maximum_retries(req);
527 fill_data_cache(ctx->chunk, ctx->chunk_size,
528 ctx->cache_line_size);
535 samples[i] = t1 - t0;
537 if (teardown(req, arg, &samples[i], tic, toc, retry,
546 measure_variant_end(ctx, req, begin);
555 void (*body)(
void *);
560 measure_variant_begin(req->name,
"HotCache");
562 sample_count = ctx->sample_count;
563 samples = ctx->samples;
567 for (i = 0; i < sample_count; ++i) {
568 unsigned int maximum_retries;
571 maximum_retries = get_maximum_retries(req);
587 samples[i] = t1 - t0;
589 (void)teardown(req, arg, &samples[i], tic, toc, retry,
598 samples[i] = t1 - t0;
600 if (teardown(req, arg, &samples[i], tic, toc, retry,
609 measure_variant_end(ctx, req, begin);
618 void (*body)(
void *);
624 measure_variant_begin(req->name,
"DirtyCache");
626 sample_count = ctx->sample_count;
627 samples = ctx->samples;
632 for (i = 0; i < sample_count; ++i) {
633 unsigned int maximum_retries;
636 maximum_retries = get_maximum_retries(req);
646 token = dirty_data_cache(ctx->chunk, ctx->chunk_size,
647 ctx->cache_line_size, token);
652 dirty_call(body, arg);
655 samples[i] = t1 - t0;
657 if (teardown(req, arg, &samples[i], tic, toc, retry,
666 measure_variant_end(ctx, req, begin);
676 recursive_load_call(
void (*body)(
void *),
void *arg,
int n)
683 delta = recursive_load_call(body, arg, n - 1);
689 dirty_call(body, arg);
700 load_call(
void (*body)(
void *),
void *arg)
706 dirty_call(body, arg);
720 void (*body)(
void *);
726 measure_variant_begin(req->name,
"Load");
727 T_printf(
"M:L:%" PRIu32
"\n", load + 1);
729 sample_count = ctx->sample_count;
730 samples = ctx->samples;
735 restart_worker(lctx);
737 for (i = 0; i < sample_count; ++i) {
738 unsigned int maximum_retries;
741 maximum_retries = get_maximum_retries(req);
750 token = dirty_data_cache(ctx->chunk, ctx->chunk_size,
751 ctx->cache_line_size, token);
756 delta = recursive_load_call(body, arg,
759 delta = load_call(body, arg);
764 if (teardown(req, arg, &samples[i], tic, toc, retry,
773 measure_variant_end(ctx, req, begin);
784 for (load = 0; load < ctx->load_count - 1; ++load) {
785 lctx = &ctx->load_contexts[load];
789 T_MEASURE_RUNTIME_DISABLE_MINOR_LOAD) == 0) {
790 measure_load_variant(ctx, req, lctx, load);
792 restart_worker(lctx);
798 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_MAX_LOAD) == 0) {
799 load = ctx->load_count - 1;
800 lctx = &ctx->load_contexts[load];
803 measure_load_variant(ctx, req, lctx, load);
807 for (load = 0; load < ctx->load_count; ++load) {
808 lctx = &ctx->load_contexts[load];
811 suspend_worker(lctx);
824 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_VALID_CACHE) == 0) {
825 measure_valid_cache(ctx, req);
828 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_HOT_CACHE) == 0) {
829 measure_hot_cache(ctx, req);
832 if ((req->flags & T_MEASURE_RUNTIME_DISABLE_DIRTY_CACHE) == 0) {
833 measure_dirty_cache(ctx, req);
836 measure_load(ctx, req);
#define rtems_scheduler_get_processor()
Returns the index of the current processor.
size_t rtems_cache_get_data_cache_size(uint32_t level)
%
rtems_status_code rtems_task_set_affinity(rtems_id id, size_t cpusetsize, const cpu_set_t *cpuset)
%
#define RTEMS_DEFAULT_ATTRIBUTES
This is the default value for an attribute set.
#define rtems_build_name(_C1, _C2, _C3, _C4)
%
rtems_status_code rtems_task_delete(rtems_id id)
%
rtems_status_code rtems_scheduler_ident_by_processor(uint32_t cpu_index, rtems_id *id)
Identifies a scheduler instance by a processor index.
#define RTEMS_NO_TIMEOUT
This clock tick interval constant indicates that the calling task is willing to wait potentially fore...
rtems_status_code rtems_task_start(rtems_id id, rtems_task_entry entry_point, rtems_task_argument argument)
%
rtems_id rtems_task_self(void)
%
CPU_Uint32ptr rtems_task_argument
This type is used to represent task argument values.
uint32_t rtems_task_priority
%
This status code indicates successful completion.
rtems_status_code
This enumeration provides status codes for directives of the Classic API.
rtems_status_code rtems_task_suspend(rtems_id id)
%
#define RTEMS_MAXIMUM_PRIORITY
%
#define SPARC_NUMBER_OF_REGISTER_WINDOWS
#define RTEMS_MINIMUM_STACK_SIZE
%
rtems_status_code rtems_scheduler_get_maximum_priority(rtems_id scheduler_id, rtems_task_priority *priority)
Gets the maximum task priority of the scheduler instance.
#define RTEMS_DEVOLATILE(_type, _var)
Performs a type cast which removes volatile qualifiers without warnings to the specified type for the...
This header file defines the RTEMS Classic API.
#define RTEMS_OBFUSCATE_VARIABLE(_var)
Obfuscates the variable so that the compiler cannot perform optimizations based on the variable value...
#define rtems_scheduler_get_processor_maximum()
Returns the processor maximum supported by the system.
Watchdog_Interval rtems_interval
This type is used to represent clock tick intervals.
rtems_status_code rtems_event_receive(rtems_event_set event_in, rtems_option option_set, rtems_interval ticks, rtems_event_set *event_out)
Receives or gets an event set.
rtems_status_code rtems_task_create(rtems_name name, rtems_task_priority initial_priority, size_t stack_size, rtems_mode initial_modes, rtems_attribute attribute_set, rtems_id *id)
Creates a task object.
Objects_Id rtems_id
Values of this type identify an RTEMS object.
size_t rtems_cache_get_data_line_size(void)
%
#define RTEMS_DEFAULT_MODES
This task mode constant represents the default mode set.
rtems_status_code rtems_task_restart(rtems_id id, rtems_task_argument argument)
%
#define rtems_clock_get_ticks_since_boot()
%
void rtems_cache_invalidate_entire_instruction(void)
%
rtems_status_code rtems_task_set_scheduler(rtems_id task_id, rtems_id scheduler_id, rtems_task_priority priority)
%
uint32_t rtems_event_set
This integer type can hold an event set of up to 32 events represented as a bit field.
rtems_status_code rtems_event_send(rtems_id id, rtems_event_set event_in)
Sends an event set to a task.
#define RTEMS_WAIT
This option constant indicates that the task wants to wait on the resource.
#define RTEMS_EVENT_ALL
This option constant indicates that the task wishes to wait until all events of interest are availabl...