RTEMS 6.1-rc4
Loading...
Searching...
No Matches
xz_config.h
1/*
2 * Private includes and definitions for userspace use of XZ Embedded
3 *
4 * Author: Lasse Collin <lasse.collin@tukaani.org>
5 *
6 * This file has been put into the public domain.
7 * You can do whatever you want with this file.
8 */
9
10#ifndef XZ_CONFIG_H
11#define XZ_CONFIG_H
12
13/* Uncomment to enable CRC64 support. */
14/* #define XZ_USE_CRC64 */
15
16/* Uncomment as needed to enable BCJ filter decoders. */
17/* #define XZ_DEC_X86 */
18/* #define XZ_DEC_POWERPC */
19/* #define XZ_DEC_IA64 */
20/* #define XZ_DEC_ARM */
21/* #define XZ_DEC_ARMTHUMB */
22/* #define XZ_DEC_SPARC */
23
24/*
25 * MSVC doesn't support modern C but XZ Embedded is mostly C89
26 * so these are enough.
27 */
28#ifdef _MSC_VER
29typedef unsigned char bool;
30# define true 1
31# define false 0
32# define inline __inline
33#else
34# include <stdbool.h>
35#endif
36
37#include <stdlib.h>
38#include <string.h>
39
40#include "xz.h"
41
42#define kmalloc(size, flags) malloc(size)
43#define kfree(ptr) free(ptr)
44#define vmalloc(size) malloc(size)
45#define vfree(ptr) free(ptr)
46
47#define memeq(a, b, size) (memcmp(a, b, size) == 0)
48#define memzero(buf, size) memset(buf, 0, size)
49
50#ifndef min
51# define min(x, y) ((x) < (y) ? (x) : (y))
52#endif
53#define min_t(type, x, y) min(x, y)
54
55/*
56 * Some functions have been marked with __always_inline to keep the
57 * performance reasonable even when the compiler is optimizing for
58 * small code size. You may be able to save a few bytes by #defining
59 * __always_inline to plain inline, but don't complain if the code
60 * becomes slow.
61 *
62 * NOTE: System headers on GNU/Linux may #define this macro already,
63 * so if you want to change it, you need to #undef it first.
64 */
65#ifndef __always_inline
66# ifdef __GNUC__
67# define __always_inline \
68 inline __attribute__((__always_inline__))
69# else
70# define __always_inline inline
71# endif
72#endif
73
74/* Inline functions to access unaligned unsigned 32-bit integers */
75#ifndef get_unaligned_le32
76static inline uint32_t get_unaligned_le32(const uint8_t *buf)
77{
78 return (uint32_t)buf[0]
79 | ((uint32_t)buf[1] << 8)
80 | ((uint32_t)buf[2] << 16)
81 | ((uint32_t)buf[3] << 24);
82}
83#endif
84
85#ifndef get_unaligned_be32
86static inline uint32_t get_unaligned_be32(const uint8_t *buf)
87{
88 return (uint32_t)(buf[0] << 24)
89 | ((uint32_t)buf[1] << 16)
90 | ((uint32_t)buf[2] << 8)
91 | (uint32_t)buf[3];
92}
93#endif
94
95#ifndef put_unaligned_le32
96static inline void put_unaligned_le32(uint32_t val, uint8_t *buf)
97{
98 buf[0] = (uint8_t)val;
99 buf[1] = (uint8_t)(val >> 8);
100 buf[2] = (uint8_t)(val >> 16);
101 buf[3] = (uint8_t)(val >> 24);
102}
103#endif
104
105#ifndef put_unaligned_be32
106static inline void put_unaligned_be32(uint32_t val, uint8_t *buf)
107{
108 buf[0] = (uint8_t)(val >> 24);
109 buf[1] = (uint8_t)(val >> 16);
110 buf[2] = (uint8_t)(val >> 8);
111 buf[3] = (uint8_t)val;
112}
113#endif
114
115/*
116 * Use get_unaligned_le32() also for aligned access for simplicity. On
117 * little endian systems, #define get_le32(ptr) (*(const uint32_t *)(ptr))
118 * could save a few bytes in code size.
119 */
120#ifndef get_le32
121# define get_le32 get_unaligned_le32
122#endif
123
124#endif