File: | build-scan/../src/import/qcow2-util.c |
Warning: | line 253, column 25 Potential leak of memory pointed to by 'l1_table' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* SPDX-License-Identifier: LGPL-2.1+ */ | ||||
2 | |||||
3 | #include <zlib.h> | ||||
4 | |||||
5 | #include "alloc-util.h" | ||||
6 | #include "btrfs-util.h" | ||||
7 | #include "qcow2-util.h" | ||||
8 | #include "sparse-endian.h" | ||||
9 | #include "util.h" | ||||
10 | |||||
11 | #define QCOW2_MAGIC0x514649fb 0x514649fb | ||||
12 | |||||
13 | #define QCOW2_COPIED(1ULL << 63) (1ULL << 63) | ||||
14 | #define QCOW2_COMPRESSED(1ULL << 62) (1ULL << 62) | ||||
15 | #define QCOW2_ZERO(1ULL << 0) (1ULL << 0) | ||||
16 | |||||
17 | typedef struct _packed___attribute__ ((packed)) Header { | ||||
18 | be32_t magic; | ||||
19 | be32_t version; | ||||
20 | |||||
21 | be64_t backing_file_offset; | ||||
22 | be32_t backing_file_size; | ||||
23 | |||||
24 | be32_t cluster_bits; | ||||
25 | be64_t size; | ||||
26 | be32_t crypt_method; | ||||
27 | |||||
28 | be32_t l1_size; | ||||
29 | be64_t l1_table_offset; | ||||
30 | |||||
31 | be64_t refcount_table_offset; | ||||
32 | be32_t refcount_table_clusters; | ||||
33 | |||||
34 | be32_t nb_snapshots; | ||||
35 | be64_t snapshots_offset; | ||||
36 | |||||
37 | /* The remainder is only present on QCOW3 */ | ||||
38 | be64_t incompatible_features; | ||||
39 | be64_t compatible_features; | ||||
40 | be64_t autoclear_features; | ||||
41 | |||||
42 | be32_t refcount_order; | ||||
43 | be32_t header_length; | ||||
44 | } Header; | ||||
45 | |||||
46 | #define HEADER_MAGIC(header)be32toh((header)->magic) be32toh((header)->magic) | ||||
47 | #define HEADER_VERSION(header)be32toh((header)->version) be32toh((header)->version) | ||||
48 | #define HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) be32toh((header)->cluster_bits) | ||||
49 | #define HEADER_CLUSTER_SIZE(header)(1ULL << be32toh((header)->cluster_bits)) (1ULL << HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits)) | ||||
50 | #define HEADER_L2_BITS(header)(be32toh((header)->cluster_bits) - 3) (HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) - 3) | ||||
51 | #define HEADER_SIZE(header)be64toh((header)->size) be64toh((header)->size) | ||||
52 | #define HEADER_CRYPT_METHOD(header)be32toh((header)->crypt_method) be32toh((header)->crypt_method) | ||||
53 | #define HEADER_L1_SIZE(header)be32toh((header)->l1_size) be32toh((header)->l1_size) | ||||
54 | #define HEADER_L2_SIZE(header)((1ULL << be32toh((header)->cluster_bits))/sizeof(uint64_t )) (HEADER_CLUSTER_SIZE(header)(1ULL << be32toh((header)->cluster_bits))/sizeof(uint64_t)) | ||||
55 | #define HEADER_L1_TABLE_OFFSET(header)be64toh((header)->l1_table_offset) be64toh((header)->l1_table_offset) | ||||
56 | |||||
57 | static uint32_t HEADER_HEADER_LENGTH(const Header *h) { | ||||
58 | if (HEADER_VERSION(h)be32toh((h)->version) < 3) | ||||
59 | return offsetof(Header, incompatible_features)__builtin_offsetof(Header, incompatible_features); | ||||
60 | |||||
61 | return be32toh(h->header_length); | ||||
62 | } | ||||
63 | |||||
64 | static int copy_cluster( | ||||
65 | int sfd, uint64_t soffset, | ||||
66 | int dfd, uint64_t doffset, | ||||
67 | uint64_t cluster_size, | ||||
68 | void *buffer) { | ||||
69 | |||||
70 | ssize_t l; | ||||
71 | int r; | ||||
72 | |||||
73 | r = btrfs_clone_range(sfd, soffset, dfd, doffset, cluster_size); | ||||
74 | if (r >= 0) | ||||
75 | return r; | ||||
76 | |||||
77 | l = pread(sfd, buffer, cluster_size, soffset); | ||||
78 | if (l < 0) | ||||
79 | return -errno(*__errno_location ()); | ||||
80 | if ((uint64_t) l != cluster_size) | ||||
81 | return -EIO5; | ||||
82 | |||||
83 | l = pwrite(dfd, buffer, cluster_size, doffset); | ||||
84 | if (l < 0) | ||||
85 | return -errno(*__errno_location ()); | ||||
86 | if ((uint64_t) l != cluster_size) | ||||
87 | return -EIO5; | ||||
88 | |||||
89 | return 0; | ||||
90 | } | ||||
91 | |||||
92 | static int decompress_cluster( | ||||
93 | int sfd, uint64_t soffset, | ||||
94 | int dfd, uint64_t doffset, | ||||
95 | uint64_t compressed_size, | ||||
96 | uint64_t cluster_size, | ||||
97 | void *buffer1, | ||||
98 | void *buffer2) { | ||||
99 | |||||
100 | _cleanup_free___attribute__((cleanup(freep))) void *large_buffer = NULL((void*)0); | ||||
101 | z_stream s = {}; | ||||
102 | uint64_t sz; | ||||
103 | ssize_t l; | ||||
104 | int r; | ||||
105 | |||||
106 | if (compressed_size > cluster_size) { | ||||
107 | /* The usual cluster buffer doesn't suffice, let's | ||||
108 | * allocate a larger one, temporarily */ | ||||
109 | |||||
110 | large_buffer = malloc(compressed_size); | ||||
111 | if (!large_buffer) | ||||
112 | return -ENOMEM12; | ||||
113 | |||||
114 | buffer1 = large_buffer; | ||||
115 | } | ||||
116 | |||||
117 | l = pread(sfd, buffer1, compressed_size, soffset); | ||||
118 | if (l < 0) | ||||
119 | return -errno(*__errno_location ()); | ||||
120 | if ((uint64_t) l != compressed_size) | ||||
121 | return -EIO5; | ||||
122 | |||||
123 | s.next_in = buffer1; | ||||
124 | s.avail_in = compressed_size; | ||||
125 | s.next_out = buffer2; | ||||
126 | s.avail_out = cluster_size; | ||||
127 | |||||
128 | r = inflateInit2(&s, -12)inflateInit2_((&s), (-12), "1.2.11", (int)sizeof(z_stream )); | ||||
129 | if (r != Z_OK0) | ||||
130 | return -EIO5; | ||||
131 | |||||
132 | r = inflate(&s, Z_FINISH4); | ||||
133 | sz = (uint8_t*) s.next_out - (uint8_t*) buffer2; | ||||
134 | inflateEnd(&s); | ||||
135 | if (r != Z_STREAM_END1 || sz != cluster_size) | ||||
136 | return -EIO5; | ||||
137 | |||||
138 | l = pwrite(dfd, buffer2, cluster_size, doffset); | ||||
139 | if (l < 0) | ||||
140 | return -errno(*__errno_location ()); | ||||
141 | if ((uint64_t) l != cluster_size) | ||||
142 | return -EIO5; | ||||
143 | |||||
144 | return 0; | ||||
145 | } | ||||
146 | |||||
147 | static int normalize_offset( | ||||
148 | const Header *header, | ||||
149 | uint64_t p, | ||||
150 | uint64_t *ret, | ||||
151 | bool_Bool *compressed, | ||||
152 | uint64_t *compressed_size) { | ||||
153 | |||||
154 | uint64_t q; | ||||
155 | |||||
156 | q = be64toh(p); | ||||
157 | |||||
158 | if (q & QCOW2_COMPRESSED(1ULL << 62)) { | ||||
159 | uint64_t sz, csize_shift, csize_mask; | ||||
160 | |||||
161 | if (!compressed) | ||||
162 | return -EOPNOTSUPP95; | ||||
163 | |||||
164 | csize_shift = 64 - 2 - (HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) - 8); | ||||
165 | csize_mask = (1ULL << (HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) - 8)) - 1; | ||||
166 | sz = (((q >> csize_shift) & csize_mask) + 1) * 512 - (q & 511); | ||||
167 | q &= ((1ULL << csize_shift) - 1); | ||||
168 | |||||
169 | if (compressed_size) | ||||
170 | *compressed_size = sz; | ||||
171 | |||||
172 | *compressed = true1; | ||||
173 | |||||
174 | } else { | ||||
175 | if (compressed) { | ||||
176 | *compressed = false0; | ||||
177 | *compressed_size = 0; | ||||
178 | } | ||||
179 | |||||
180 | if (q & QCOW2_ZERO(1ULL << 0)) { | ||||
181 | /* We make no distinction between zero blocks and holes */ | ||||
182 | *ret = 0; | ||||
183 | return 0; | ||||
184 | } | ||||
185 | |||||
186 | q &= ~QCOW2_COPIED(1ULL << 63); | ||||
187 | } | ||||
188 | |||||
189 | *ret = q; | ||||
190 | return q > 0; /* returns positive if not a hole */ | ||||
191 | } | ||||
192 | |||||
193 | static int verify_header(const Header *header) { | ||||
194 | assert(header)do { if ((__builtin_expect(!!(!(header)),0))) log_assert_failed_realm (LOG_REALM_SYSTEMD, ("header"), "../src/import/qcow2-util.c", 194, __PRETTY_FUNCTION__); } while (0); | ||||
195 | |||||
196 | if (HEADER_MAGIC(header)be32toh((header)->magic) != QCOW2_MAGIC0x514649fb) | ||||
197 | return -EBADMSG74; | ||||
198 | |||||
199 | if (!IN_SET(HEADER_VERSION(header), 2, 3)({ _Bool _found = 0; static __attribute__ ((unused)) char _static_assert__macros_need_to_be_extended [20 - sizeof((int[]){2, 3})/sizeof(int)]; switch(be32toh((header )->version)) { case 2: case 3: _found = 1; break; default: break; } _found; })) | ||||
200 | return -EOPNOTSUPP95; | ||||
201 | |||||
202 | if (HEADER_CRYPT_METHOD(header)be32toh((header)->crypt_method) != 0) | ||||
203 | return -EOPNOTSUPP95; | ||||
204 | |||||
205 | if (HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) < 9) /* 512K */ | ||||
206 | return -EBADMSG74; | ||||
207 | |||||
208 | if (HEADER_CLUSTER_BITS(header)be32toh((header)->cluster_bits) > 21) /* 2MB */ | ||||
209 | return -EBADMSG74; | ||||
210 | |||||
211 | if (HEADER_SIZE(header)be64toh((header)->size) % HEADER_CLUSTER_SIZE(header)(1ULL << be32toh((header)->cluster_bits)) != 0) | ||||
212 | return -EBADMSG74; | ||||
213 | |||||
214 | if (HEADER_L1_SIZE(header)be32toh((header)->l1_size) > 32*1024*1024) /* 32MB */ | ||||
215 | return -EBADMSG74; | ||||
216 | |||||
217 | if (HEADER_VERSION(header)be32toh((header)->version) == 3) { | ||||
218 | |||||
219 | if (header->incompatible_features != 0) | ||||
220 | return -EOPNOTSUPP95; | ||||
221 | |||||
222 | if (HEADER_HEADER_LENGTH(header) < sizeof(Header)) | ||||
223 | return -EBADMSG74; | ||||
224 | } | ||||
225 | |||||
226 | return 0; | ||||
227 | } | ||||
228 | |||||
229 | int qcow2_convert(int qcow2_fd, int raw_fd) { | ||||
230 | _cleanup_free___attribute__((cleanup(freep))) void *buffer1 = NULL((void*)0), *buffer2 = NULL((void*)0); | ||||
231 | _cleanup_free___attribute__((cleanup(freep))) be64_t *l1_table = NULL((void*)0), *l2_table = NULL((void*)0); | ||||
232 | uint64_t sz, i; | ||||
233 | Header header; | ||||
234 | ssize_t l; | ||||
235 | int r; | ||||
236 | |||||
237 | l = pread(qcow2_fd, &header, sizeof(header), 0); | ||||
238 | if (l < 0) | ||||
| |||||
239 | return -errno(*__errno_location ()); | ||||
240 | if (l != sizeof(header)) | ||||
241 | return -EIO5; | ||||
242 | |||||
243 | r = verify_header(&header); | ||||
244 | if (r
| ||||
245 | return r; | ||||
246 | |||||
247 | l1_table = new(be64_t, HEADER_L1_SIZE(&header))((be64_t*) malloc_multiply(sizeof(be64_t), (be32toh((&header )->l1_size)))); | ||||
248 | if (!l1_table) | ||||
249 | return -ENOMEM12; | ||||
250 | |||||
251 | l2_table = malloc(HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits))); | ||||
252 | if (!l2_table) | ||||
253 | return -ENOMEM12; | ||||
| |||||
254 | |||||
255 | buffer1 = malloc(HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits))); | ||||
256 | if (!buffer1) | ||||
257 | return -ENOMEM12; | ||||
258 | |||||
259 | buffer2 = malloc(HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits))); | ||||
260 | if (!buffer2) | ||||
261 | return -ENOMEM12; | ||||
262 | |||||
263 | /* Empty the file if it exists, we rely on zero bits */ | ||||
264 | if (ftruncate(raw_fd, 0) < 0) | ||||
265 | return -errno(*__errno_location ()); | ||||
266 | |||||
267 | if (ftruncate(raw_fd, HEADER_SIZE(&header)be64toh((&header)->size)) < 0) | ||||
268 | return -errno(*__errno_location ()); | ||||
269 | |||||
270 | sz = sizeof(uint64_t) * HEADER_L1_SIZE(&header)be32toh((&header)->l1_size); | ||||
271 | l = pread(qcow2_fd, l1_table, sz, HEADER_L1_TABLE_OFFSET(&header)be64toh((&header)->l1_table_offset)); | ||||
272 | if (l < 0) | ||||
273 | return -errno(*__errno_location ()); | ||||
274 | if ((uint64_t) l != sz) | ||||
275 | return -EIO5; | ||||
276 | |||||
277 | for (i = 0; i < HEADER_L1_SIZE(&header)be32toh((&header)->l1_size); i ++) { | ||||
278 | uint64_t l2_begin, j; | ||||
279 | |||||
280 | r = normalize_offset(&header, l1_table[i], &l2_begin, NULL((void*)0), NULL((void*)0)); | ||||
281 | if (r < 0) | ||||
282 | return r; | ||||
283 | if (r == 0) | ||||
284 | continue; | ||||
285 | |||||
286 | l = pread(qcow2_fd, l2_table, HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits)), l2_begin); | ||||
287 | if (l < 0) | ||||
288 | return -errno(*__errno_location ()); | ||||
289 | if ((uint64_t) l != HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits))) | ||||
290 | return -EIO5; | ||||
291 | |||||
292 | for (j = 0; j < HEADER_L2_SIZE(&header)((1ULL << be32toh((&header)->cluster_bits))/sizeof (uint64_t)); j++) { | ||||
293 | uint64_t data_begin, p, compressed_size; | ||||
294 | bool_Bool compressed; | ||||
295 | |||||
296 | p = ((i << HEADER_L2_BITS(&header)(be32toh((&header)->cluster_bits) - 3)) + j) << HEADER_CLUSTER_BITS(&header)be32toh((&header)->cluster_bits); | ||||
297 | |||||
298 | r = normalize_offset(&header, l2_table[j], &data_begin, &compressed, &compressed_size); | ||||
299 | if (r < 0) | ||||
300 | return r; | ||||
301 | if (r == 0) | ||||
302 | continue; | ||||
303 | |||||
304 | if (compressed) | ||||
305 | r = decompress_cluster( | ||||
306 | qcow2_fd, data_begin, | ||||
307 | raw_fd, p, | ||||
308 | compressed_size, HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits)), | ||||
309 | buffer1, buffer2); | ||||
310 | else | ||||
311 | r = copy_cluster( | ||||
312 | qcow2_fd, data_begin, | ||||
313 | raw_fd, p, | ||||
314 | HEADER_CLUSTER_SIZE(&header)(1ULL << be32toh((&header)->cluster_bits)), buffer1); | ||||
315 | if (r < 0) | ||||
316 | return r; | ||||
317 | } | ||||
318 | } | ||||
319 | |||||
320 | return 0; | ||||
321 | } | ||||
322 | |||||
323 | int qcow2_detect(int fd) { | ||||
324 | be32_t id; | ||||
325 | ssize_t l; | ||||
326 | |||||
327 | l = pread(fd, &id, sizeof(id), 0); | ||||
328 | if (l < 0) | ||||
329 | return -errno(*__errno_location ()); | ||||
330 | if (l != sizeof(id)) | ||||
331 | return -EIO5; | ||||
332 | |||||
333 | return htobe32(QCOW2_MAGIC0x514649fb) == id; | ||||
334 | } |
1 | /* SPDX-License-Identifier: LGPL-2.1+ */ |
2 | #pragma once |
3 | |
4 | #include <alloca.h> |
5 | #include <stddef.h> |
6 | #include <stdlib.h> |
7 | #include <string.h> |
8 | |
9 | #include "macro.h" |
10 | |
11 | #define new(t, n)((t*) malloc_multiply(sizeof(t), (n))) ((t*) malloc_multiply(sizeof(t), (n))) |
12 | |
13 | #define new0(t, n)((t*) calloc((n), sizeof(t))) ((t*) calloc((n), sizeof(t))) |
14 | |
15 | #define newa(t, n)({ do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 15, __PRETTY_FUNCTION__); } while (0); (t*) __builtin_alloca (sizeof(t)*(n)); }) \ |
16 | ({ \ |
17 | assert(!size_multiply_overflow(sizeof(t), n))do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 17, __PRETTY_FUNCTION__); } while (0); \ |
18 | (t*) alloca(sizeof(t)*(n))__builtin_alloca (sizeof(t)*(n)); \ |
19 | }) |
20 | |
21 | #define newa0(t, n)({ do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 21, __PRETTY_FUNCTION__); } while (0); (t*) ({ char *_new_; size_t _len_ = sizeof(t)*(n); _new_ = __builtin_alloca (_len_); (void *) memset(_new_, 0, _len_) ; }); }) \ |
22 | ({ \ |
23 | assert(!size_multiply_overflow(sizeof(t), n))do { if ((__builtin_expect(!!(!(!size_multiply_overflow(sizeof (t), n))),0))) log_assert_failed_realm(LOG_REALM_SYSTEMD, ("!size_multiply_overflow(sizeof(t), n)" ), "../src/basic/alloc-util.h", 23, __PRETTY_FUNCTION__); } while (0); \ |
24 | (t*) alloca0(sizeof(t)*(n))({ char *_new_; size_t _len_ = sizeof(t)*(n); _new_ = __builtin_alloca (_len_); (void *) memset(_new_, 0, _len_); }); \ |
25 | }) |
26 | |
27 | #define newdup(t, p, n)((t*) memdup_multiply(p, sizeof(t), (n))) ((t*) memdup_multiply(p, sizeof(t), (n))) |
28 | |
29 | #define newdup_suffix0(t, p, n)((t*) memdup_suffix0_multiply(p, sizeof(t), (n))) ((t*) memdup_suffix0_multiply(p, sizeof(t), (n))) |
30 | |
31 | #define malloc0(n)(calloc(1, (n))) (calloc(1, (n))) |
32 | |
33 | static inline void *mfree(void *memory) { |
34 | free(memory); |
35 | return NULL((void*)0); |
36 | } |
37 | |
38 | #define free_and_replace(a, b)({ free(a); (a) = (b); (b) = ((void*)0); 0; }) \ |
39 | ({ \ |
40 | free(a); \ |
41 | (a) = (b); \ |
42 | (b) = NULL((void*)0); \ |
43 | 0; \ |
44 | }) |
45 | |
46 | void* memdup(const void *p, size_t l) _alloc_(2); |
47 | void* memdup_suffix0(const void *p, size_t l) _alloc_(2); |
48 | |
49 | static inline void freep(void *p) { |
50 | free(*(void**) p); |
51 | } |
52 | |
53 | #define _cleanup_free___attribute__((cleanup(freep))) _cleanup_(freep)__attribute__((cleanup(freep))) |
54 | |
55 | static inline bool_Bool size_multiply_overflow(size_t size, size_t need) { |
56 | return _unlikely_(need != 0 && size > (SIZE_MAX / need))(__builtin_expect(!!(need != 0 && size > ((18446744073709551615UL ) / need)),0)); |
57 | } |
58 | |
59 | _malloc___attribute__ ((malloc)) _alloc_(1, 2) static inline void *malloc_multiply(size_t size, size_t need) { |
60 | if (size_multiply_overflow(size, need)) |
61 | return NULL((void*)0); |
62 | |
63 | return malloc(size * need); |
64 | } |
65 | |
66 | #if !HAVE_REALLOCARRAY1 |
67 | _alloc_(2, 3) static inline void *reallocarray(void *p, size_t need, size_t size) { |
68 | if (size_multiply_overflow(size, need)) |
69 | return NULL((void*)0); |
70 | |
71 | return realloc(p, size * need); |
72 | } |
73 | #endif |
74 | |
75 | _alloc_(2, 3) static inline void *memdup_multiply(const void *p, size_t size, size_t need) { |
76 | if (size_multiply_overflow(size, need)) |
77 | return NULL((void*)0); |
78 | |
79 | return memdup(p, size * need); |
80 | } |
81 | |
82 | _alloc_(2, 3) static inline void *memdup_suffix0_multiply(const void *p, size_t size, size_t need) { |
83 | if (size_multiply_overflow(size, need)) |
84 | return NULL((void*)0); |
85 | |
86 | return memdup_suffix0(p, size * need); |
87 | } |
88 | |
89 | void* greedy_realloc(void **p, size_t *allocated, size_t need, size_t size); |
90 | void* greedy_realloc0(void **p, size_t *allocated, size_t need, size_t size); |
91 | |
92 | #define GREEDY_REALLOC(array, allocated, need)greedy_realloc((void**) &(array), &(allocated), (need ), sizeof((array)[0])) \ |
93 | greedy_realloc((void**) &(array), &(allocated), (need), sizeof((array)[0])) |
94 | |
95 | #define GREEDY_REALLOC0(array, allocated, need)greedy_realloc0((void**) &(array), &(allocated), (need ), sizeof((array)[0])) \ |
96 | greedy_realloc0((void**) &(array), &(allocated), (need), sizeof((array)[0])) |
97 | |
98 | #define alloca0(n)({ char *_new_; size_t _len_ = n; _new_ = __builtin_alloca (_len_ ); (void *) memset(_new_, 0, _len_); }) \ |
99 | ({ \ |
100 | char *_new_; \ |
101 | size_t _len_ = n; \ |
102 | _new_ = alloca(_len_)__builtin_alloca (_len_); \ |
103 | (void *) memset(_new_, 0, _len_); \ |
104 | }) |
105 | |
106 | /* It's not clear what alignment glibc/gcc alloca() guarantee, hence provide a guaranteed safe version */ |
107 | #define alloca_align(size, align)({ void *_ptr_; size_t _mask_ = (align) - 1; _ptr_ = __builtin_alloca ((size) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); }) \ |
108 | ({ \ |
109 | void *_ptr_; \ |
110 | size_t _mask_ = (align) - 1; \ |
111 | _ptr_ = alloca((size) + _mask_)__builtin_alloca ((size) + _mask_); \ |
112 | (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); \ |
113 | }) |
114 | |
115 | #define alloca0_align(size, align)({ void *_new_; size_t _size_ = (size); _new_ = ({ void *_ptr_ ; size_t _mask_ = ((align)) - 1; _ptr_ = __builtin_alloca ((_size_ ) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_ ); }); (void*)memset(_new_, 0, _size_); }) \ |
116 | ({ \ |
117 | void *_new_; \ |
118 | size_t _size_ = (size); \ |
119 | _new_ = alloca_align(_size_, (align))({ void *_ptr_; size_t _mask_ = ((align)) - 1; _ptr_ = __builtin_alloca ((_size_) + _mask_); (void*)(((uintptr_t)_ptr_ + _mask_) & ~_mask_); }); \ |
120 | (void*)memset(_new_, 0, _size_); \ |
121 | }) |
122 | |
123 | /* Takes inspiration from Rusts's Option::take() method: reads and returns a pointer, but at the same time resets it to |
124 | * NULL. See: https://doc.rust-lang.org/std/option/enum.Option.html#method.take */ |
125 | #define TAKE_PTR(ptr)({ typeof(ptr) _ptr_ = (ptr); (ptr) = ((void*)0); _ptr_; }) \ |
126 | ({ \ |
127 | typeof(ptr) _ptr_ = (ptr); \ |
128 | (ptr) = NULL((void*)0); \ |
129 | _ptr_; \ |
130 | }) |